aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/SystemZ
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/SystemZ')
-rw-r--r--contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp806
-rw-r--r--contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp323
-rw-r--r--contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp165
-rw-r--r--contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h69
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp126
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp36
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h30
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp187
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h31
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp140
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp229
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h96
-rw-r--r--contrib/llvm/lib/Target/SystemZ/README.txt181
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.h115
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.td72
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp239
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h52
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.cpp21
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h23
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td65
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp62
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h55
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp471
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp521
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h78
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp1148
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp3248
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h319
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h48
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td366
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td1603
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp1247
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h257
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td1393
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp462
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp100
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h44
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.cpp17
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h68
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperands.td482
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperators.td378
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td152
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td46
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp141
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h68
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td167
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp293
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h80
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp163
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp67
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h70
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp108
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h74
-rw-r--r--contrib/llvm/lib/Target/SystemZ/TargetInfo/SystemZTargetInfo.cpp20
54 files changed, 16822 insertions, 0 deletions
diff --git a/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
new file mode 100644
index 000000000000..763f40c7ff44
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -0,0 +1,806 @@
+//===-- SystemZAsmParser.cpp - Parse SystemZ assembly instructions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+// Return true if Expr is in the range [MinValue, MaxValue].
+static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) {
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) {
+ int64_t Value = CE->getValue();
+ return Value >= MinValue && Value <= MaxValue;
+ }
+ return false;
+}
+
+namespace {
+enum RegisterKind {
+ GR32Reg,
+ GRH32Reg,
+ GR64Reg,
+ GR128Reg,
+ ADDR32Reg,
+ ADDR64Reg,
+ FP32Reg,
+ FP64Reg,
+ FP128Reg
+};
+
+enum MemoryKind {
+ BDMem,
+ BDXMem,
+ BDLMem
+};
+
+class SystemZOperand : public MCParsedAsmOperand {
+public:
+private:
+ enum OperandKind {
+ KindInvalid,
+ KindToken,
+ KindReg,
+ KindAccessReg,
+ KindImm,
+ KindMem
+ };
+
+ OperandKind Kind;
+ SMLoc StartLoc, EndLoc;
+
+ // A string of length Length, starting at Data.
+ struct TokenOp {
+ const char *Data;
+ unsigned Length;
+ };
+
+ // LLVM register Num, which has kind Kind. In some ways it might be
+ // easier for this class to have a register bank (general, floating-point
+ // or access) and a raw register number (0-15). This would postpone the
+ // interpretation of the operand to the add*() methods and avoid the need
+ // for context-dependent parsing. However, we do things the current way
+ // because of the virtual getReg() method, which needs to distinguish
+ // between (say) %r0 used as a single register and %r0 used as a pair.
+ // Context-dependent parsing can also give us slightly better error
+ // messages when invalid pairs like %r1 are used.
+ struct RegOp {
+ RegisterKind Kind;
+ unsigned Num;
+ };
+
+ // Base + Disp + Index, where Base and Index are LLVM registers or 0.
+ // RegKind says what type the registers have (ADDR32Reg or ADDR64Reg).
+ // Length is the operand length for D(L,B)-style operands, otherwise
+ // it is null.
+ struct MemOp {
+ unsigned Base : 8;
+ unsigned Index : 8;
+ unsigned RegKind : 8;
+ unsigned Unused : 8;
+ const MCExpr *Disp;
+ const MCExpr *Length;
+ };
+
+ union {
+ TokenOp Token;
+ RegOp Reg;
+ unsigned AccessReg;
+ const MCExpr *Imm;
+ MemOp Mem;
+ };
+
+ SystemZOperand(OperandKind kind, SMLoc startLoc, SMLoc endLoc)
+ : Kind(kind), StartLoc(startLoc), EndLoc(endLoc)
+ {}
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const {
+ // Add as immediates when possible. Null MCExpr = 0.
+ if (Expr == 0)
+ Inst.addOperand(MCOperand::CreateImm(0));
+ else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::CreateExpr(Expr));
+ }
+
+public:
+ // Create particular kinds of operand.
+ static SystemZOperand *createInvalid(SMLoc StartLoc, SMLoc EndLoc) {
+ return new SystemZOperand(KindInvalid, StartLoc, EndLoc);
+ }
+ static SystemZOperand *createToken(StringRef Str, SMLoc Loc) {
+ SystemZOperand *Op = new SystemZOperand(KindToken, Loc, Loc);
+ Op->Token.Data = Str.data();
+ Op->Token.Length = Str.size();
+ return Op;
+ }
+ static SystemZOperand *createReg(RegisterKind Kind, unsigned Num,
+ SMLoc StartLoc, SMLoc EndLoc) {
+ SystemZOperand *Op = new SystemZOperand(KindReg, StartLoc, EndLoc);
+ Op->Reg.Kind = Kind;
+ Op->Reg.Num = Num;
+ return Op;
+ }
+ static SystemZOperand *createAccessReg(unsigned Num, SMLoc StartLoc,
+ SMLoc EndLoc) {
+ SystemZOperand *Op = new SystemZOperand(KindAccessReg, StartLoc, EndLoc);
+ Op->AccessReg = Num;
+ return Op;
+ }
+ static SystemZOperand *createImm(const MCExpr *Expr, SMLoc StartLoc,
+ SMLoc EndLoc) {
+ SystemZOperand *Op = new SystemZOperand(KindImm, StartLoc, EndLoc);
+ Op->Imm = Expr;
+ return Op;
+ }
+ static SystemZOperand *createMem(RegisterKind RegKind, unsigned Base,
+ const MCExpr *Disp, unsigned Index,
+ const MCExpr *Length, SMLoc StartLoc,
+ SMLoc EndLoc) {
+ SystemZOperand *Op = new SystemZOperand(KindMem, StartLoc, EndLoc);
+ Op->Mem.RegKind = RegKind;
+ Op->Mem.Base = Base;
+ Op->Mem.Index = Index;
+ Op->Mem.Disp = Disp;
+ Op->Mem.Length = Length;
+ return Op;
+ }
+
+ // Token operands
+ virtual bool isToken() const LLVM_OVERRIDE {
+ return Kind == KindToken;
+ }
+ StringRef getToken() const {
+ assert(Kind == KindToken && "Not a token");
+ return StringRef(Token.Data, Token.Length);
+ }
+
+ // Register operands.
+ virtual bool isReg() const LLVM_OVERRIDE {
+ return Kind == KindReg;
+ }
+ bool isReg(RegisterKind RegKind) const {
+ return Kind == KindReg && Reg.Kind == RegKind;
+ }
+ virtual unsigned getReg() const LLVM_OVERRIDE {
+ assert(Kind == KindReg && "Not a register");
+ return Reg.Num;
+ }
+
+ // Access register operands. Access registers aren't exposed to LLVM
+ // as registers.
+ bool isAccessReg() const {
+ return Kind == KindAccessReg;
+ }
+
+ // Immediate operands.
+ virtual bool isImm() const LLVM_OVERRIDE {
+ return Kind == KindImm;
+ }
+ bool isImm(int64_t MinValue, int64_t MaxValue) const {
+ return Kind == KindImm && inRange(Imm, MinValue, MaxValue);
+ }
+ const MCExpr *getImm() const {
+ assert(Kind == KindImm && "Not an immediate");
+ return Imm;
+ }
+
+ // Memory operands.
+ virtual bool isMem() const LLVM_OVERRIDE {
+ return Kind == KindMem;
+ }
+ bool isMem(RegisterKind RegKind, MemoryKind MemKind) const {
+ return (Kind == KindMem &&
+ Mem.RegKind == RegKind &&
+ (MemKind == BDXMem || !Mem.Index) &&
+ (MemKind == BDLMem) == (Mem.Length != 0));
+ }
+ bool isMemDisp12(RegisterKind RegKind, MemoryKind MemKind) const {
+ return isMem(RegKind, MemKind) && inRange(Mem.Disp, 0, 0xfff);
+ }
+ bool isMemDisp20(RegisterKind RegKind, MemoryKind MemKind) const {
+ return isMem(RegKind, MemKind) && inRange(Mem.Disp, -524288, 524287);
+ }
+ bool isMemDisp12Len8(RegisterKind RegKind) const {
+ return isMemDisp12(RegKind, BDLMem) && inRange(Mem.Length, 1, 0x100);
+ }
+
+ // Override MCParsedAsmOperand.
+ virtual SMLoc getStartLoc() const LLVM_OVERRIDE { return StartLoc; }
+ virtual SMLoc getEndLoc() const LLVM_OVERRIDE { return EndLoc; }
+ virtual void print(raw_ostream &OS) const LLVM_OVERRIDE;
+
+ // Used by the TableGen code to add particular types of operand
+ // to an instruction.
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands");
+ Inst.addOperand(MCOperand::CreateReg(getReg()));
+ }
+ void addAccessRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands");
+ assert(Kind == KindAccessReg && "Invalid operand type");
+ Inst.addOperand(MCOperand::CreateImm(AccessReg));
+ }
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands");
+ addExpr(Inst, getImm());
+ }
+ void addBDAddrOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands");
+ assert(Kind == KindMem && Mem.Index == 0 && "Invalid operand type");
+ Inst.addOperand(MCOperand::CreateReg(Mem.Base));
+ addExpr(Inst, Mem.Disp);
+ }
+ void addBDXAddrOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 3 && "Invalid number of operands");
+ assert(Kind == KindMem && "Invalid operand type");
+ Inst.addOperand(MCOperand::CreateReg(Mem.Base));
+ addExpr(Inst, Mem.Disp);
+ Inst.addOperand(MCOperand::CreateReg(Mem.Index));
+ }
+ void addBDLAddrOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 3 && "Invalid number of operands");
+ assert(Kind == KindMem && "Invalid operand type");
+ Inst.addOperand(MCOperand::CreateReg(Mem.Base));
+ addExpr(Inst, Mem.Disp);
+ addExpr(Inst, Mem.Length);
+ }
+
+ // Used by the TableGen code to check for particular operand types.
+ bool isGR32() const { return isReg(GR32Reg); }
+ bool isGRH32() const { return isReg(GRH32Reg); }
+ bool isGRX32() const { return false; }
+ bool isGR64() const { return isReg(GR64Reg); }
+ bool isGR128() const { return isReg(GR128Reg); }
+ bool isADDR32() const { return isReg(ADDR32Reg); }
+ bool isADDR64() const { return isReg(ADDR64Reg); }
+ bool isADDR128() const { return false; }
+ bool isFP32() const { return isReg(FP32Reg); }
+ bool isFP64() const { return isReg(FP64Reg); }
+ bool isFP128() const { return isReg(FP128Reg); }
+ bool isBDAddr32Disp12() const { return isMemDisp12(ADDR32Reg, BDMem); }
+ bool isBDAddr32Disp20() const { return isMemDisp20(ADDR32Reg, BDMem); }
+ bool isBDAddr64Disp12() const { return isMemDisp12(ADDR64Reg, BDMem); }
+ bool isBDAddr64Disp20() const { return isMemDisp20(ADDR64Reg, BDMem); }
+ bool isBDXAddr64Disp12() const { return isMemDisp12(ADDR64Reg, BDXMem); }
+ bool isBDXAddr64Disp20() const { return isMemDisp20(ADDR64Reg, BDXMem); }
+ bool isBDLAddr64Disp12Len8() const { return isMemDisp12Len8(ADDR64Reg); }
+ bool isU4Imm() const { return isImm(0, 15); }
+ bool isU6Imm() const { return isImm(0, 63); }
+ bool isU8Imm() const { return isImm(0, 255); }
+ bool isS8Imm() const { return isImm(-128, 127); }
+ bool isU16Imm() const { return isImm(0, 65535); }
+ bool isS16Imm() const { return isImm(-32768, 32767); }
+ bool isU32Imm() const { return isImm(0, (1LL << 32) - 1); }
+ bool isS32Imm() const { return isImm(-(1LL << 31), (1LL << 31) - 1); }
+};
+
+class SystemZAsmParser : public MCTargetAsmParser {
+#define GET_ASSEMBLER_HEADER
+#include "SystemZGenAsmMatcher.inc"
+
+private:
+ MCSubtargetInfo &STI;
+ MCAsmParser &Parser;
+ enum RegisterGroup {
+ RegGR,
+ RegFP,
+ RegAccess
+ };
+ struct Register {
+ RegisterGroup Group;
+ unsigned Num;
+ SMLoc StartLoc, EndLoc;
+ };
+
+ bool parseRegister(Register &Reg);
+
+ bool parseRegister(Register &Reg, RegisterGroup Group, const unsigned *Regs,
+ bool IsAddress = false);
+
+ OperandMatchResultTy
+ parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ RegisterGroup Group, const unsigned *Regs, RegisterKind Kind);
+
+ bool parseAddress(unsigned &Base, const MCExpr *&Disp,
+ unsigned &Index, const MCExpr *&Length,
+ const unsigned *Regs, RegisterKind RegKind);
+
+ OperandMatchResultTy
+ parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ const unsigned *Regs, RegisterKind RegKind,
+ MemoryKind MemKind);
+
+ bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ StringRef Mnemonic);
+
+public:
+ SystemZAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(sti), Parser(parser) {
+ MCAsmParserExtension::Initialize(Parser);
+
+ // Initialize the set of available features.
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ }
+
+ // Override MCTargetAsmParser.
+ virtual bool ParseDirective(AsmToken DirectiveID) LLVM_OVERRIDE;
+ virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) LLVM_OVERRIDE;
+ virtual bool ParseInstruction(ParseInstructionInfo &Info,
+ StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands)
+ LLVM_OVERRIDE;
+ virtual bool
+ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) LLVM_OVERRIDE;
+
+ // Used by the TableGen code to parse particular operand types.
+ OperandMatchResultTy
+ parseGR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, GR32Reg);
+ }
+ OperandMatchResultTy
+ parseGRH32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegGR, SystemZMC::GRH32Regs, GRH32Reg);
+ }
+ OperandMatchResultTy
+ parseGRX32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ llvm_unreachable("GRX32 should only be used for pseudo instructions");
+ }
+ OperandMatchResultTy
+ parseGR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, GR64Reg);
+ }
+ OperandMatchResultTy
+ parseGR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegGR, SystemZMC::GR128Regs, GR128Reg);
+ }
+ OperandMatchResultTy
+ parseADDR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, ADDR32Reg);
+ }
+ OperandMatchResultTy
+ parseADDR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, ADDR64Reg);
+ }
+ OperandMatchResultTy
+ parseADDR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ llvm_unreachable("Shouldn't be used as an operand");
+ }
+ OperandMatchResultTy
+ parseFP32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegFP, SystemZMC::FP32Regs, FP32Reg);
+ }
+ OperandMatchResultTy
+ parseFP64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegFP, SystemZMC::FP64Regs, FP64Reg);
+ }
+ OperandMatchResultTy
+ parseFP128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseRegister(Operands, RegFP, SystemZMC::FP128Regs, FP128Reg);
+ }
+ OperandMatchResultTy
+ parseBDAddr32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseAddress(Operands, SystemZMC::GR32Regs, ADDR32Reg, BDMem);
+ }
+ OperandMatchResultTy
+ parseBDAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDMem);
+ }
+ OperandMatchResultTy
+ parseBDXAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDXMem);
+ }
+ OperandMatchResultTy
+ parseBDLAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDLMem);
+ }
+ OperandMatchResultTy
+ parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ OperandMatchResultTy
+ parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ int64_t MinVal, int64_t MaxVal);
+ OperandMatchResultTy
+ parsePCRel16(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parsePCRel(Operands, -(1LL << 16), (1LL << 16) - 1);
+ }
+ OperandMatchResultTy
+ parsePCRel32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1);
+ }
+};
+}
+
+#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
+#define GET_MATCHER_IMPLEMENTATION
+#include "SystemZGenAsmMatcher.inc"
+
+void SystemZOperand::print(raw_ostream &OS) const {
+ llvm_unreachable("Not implemented");
+}
+
+// Parse one register of the form %<prefix><number>.
+bool SystemZAsmParser::parseRegister(Register &Reg) {
+ Reg.StartLoc = Parser.getTok().getLoc();
+
+ // Eat the % prefix.
+ if (Parser.getTok().isNot(AsmToken::Percent))
+ return Error(Parser.getTok().getLoc(), "register expected");
+ Parser.Lex();
+
+ // Expect a register name.
+ if (Parser.getTok().isNot(AsmToken::Identifier))
+ return Error(Reg.StartLoc, "invalid register");
+
+ // Check that there's a prefix.
+ StringRef Name = Parser.getTok().getString();
+ if (Name.size() < 2)
+ return Error(Reg.StartLoc, "invalid register");
+ char Prefix = Name[0];
+
+ // Treat the rest of the register name as a register number.
+ if (Name.substr(1).getAsInteger(10, Reg.Num))
+ return Error(Reg.StartLoc, "invalid register");
+
+ // Look for valid combinations of prefix and number.
+ if (Prefix == 'r' && Reg.Num < 16)
+ Reg.Group = RegGR;
+ else if (Prefix == 'f' && Reg.Num < 16)
+ Reg.Group = RegFP;
+ else if (Prefix == 'a' && Reg.Num < 16)
+ Reg.Group = RegAccess;
+ else
+ return Error(Reg.StartLoc, "invalid register");
+
+ Reg.EndLoc = Parser.getTok().getLoc();
+ Parser.Lex();
+ return false;
+}
+
+// Parse a register of group Group. If Regs is nonnull, use it to map
+// the raw register number to LLVM numbering, with zero entries indicating
+// an invalid register. IsAddress says whether the register appears in an
+// address context.
+bool SystemZAsmParser::parseRegister(Register &Reg, RegisterGroup Group,
+ const unsigned *Regs, bool IsAddress) {
+ if (parseRegister(Reg))
+ return true;
+ if (Reg.Group != Group)
+ return Error(Reg.StartLoc, "invalid operand for instruction");
+ if (Regs && Regs[Reg.Num] == 0)
+ return Error(Reg.StartLoc, "invalid register pair");
+ if (Reg.Num == 0 && IsAddress)
+ return Error(Reg.StartLoc, "%r0 used in an address");
+ if (Regs)
+ Reg.Num = Regs[Reg.Num];
+ return false;
+}
+
+// Parse a register and add it to Operands. The other arguments are as above.
+SystemZAsmParser::OperandMatchResultTy
+SystemZAsmParser::parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ RegisterGroup Group, const unsigned *Regs,
+ RegisterKind Kind) {
+ if (Parser.getTok().isNot(AsmToken::Percent))
+ return MatchOperand_NoMatch;
+
+ Register Reg;
+ bool IsAddress = (Kind == ADDR32Reg || Kind == ADDR64Reg);
+ if (parseRegister(Reg, Group, Regs, IsAddress))
+ return MatchOperand_ParseFail;
+
+ Operands.push_back(SystemZOperand::createReg(Kind, Reg.Num,
+ Reg.StartLoc, Reg.EndLoc));
+ return MatchOperand_Success;
+}
+
+// Parse a memory operand into Base, Disp, Index and Length.
+// Regs maps asm register numbers to LLVM register numbers and RegKind
+// says what kind of address register we're using (ADDR32Reg or ADDR64Reg).
+bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp,
+ unsigned &Index, const MCExpr *&Length,
+ const unsigned *Regs,
+ RegisterKind RegKind) {
+ // Parse the displacement, which must always be present.
+ if (getParser().parseExpression(Disp))
+ return true;
+
+ // Parse the optional base and index.
+ Index = 0;
+ Base = 0;
+ Length = 0;
+ if (getLexer().is(AsmToken::LParen)) {
+ Parser.Lex();
+
+ if (getLexer().is(AsmToken::Percent)) {
+ // Parse the first register and decide whether it's a base or an index.
+ Register Reg;
+ if (parseRegister(Reg, RegGR, Regs, RegKind))
+ return true;
+ if (getLexer().is(AsmToken::Comma))
+ Index = Reg.Num;
+ else
+ Base = Reg.Num;
+ } else {
+ // Parse the length.
+ if (getParser().parseExpression(Length))
+ return true;
+ }
+
+ // Check whether there's a second register. It's the base if so.
+ if (getLexer().is(AsmToken::Comma)) {
+ Parser.Lex();
+ Register Reg;
+ if (parseRegister(Reg, RegGR, Regs, RegKind))
+ return true;
+ Base = Reg.Num;
+ }
+
+ // Consume the closing bracket.
+ if (getLexer().isNot(AsmToken::RParen))
+ return Error(Parser.getTok().getLoc(), "unexpected token in address");
+ Parser.Lex();
+ }
+ return false;
+}
+
+// Parse a memory operand and add it to Operands. The other arguments
+// are as above.
+SystemZAsmParser::OperandMatchResultTy
+SystemZAsmParser::parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ const unsigned *Regs, RegisterKind RegKind,
+ MemoryKind MemKind) {
+ SMLoc StartLoc = Parser.getTok().getLoc();
+ unsigned Base, Index;
+ const MCExpr *Disp;
+ const MCExpr *Length;
+ if (parseAddress(Base, Disp, Index, Length, Regs, RegKind))
+ return MatchOperand_ParseFail;
+
+ if (Index && MemKind != BDXMem)
+ {
+ Error(StartLoc, "invalid use of indexed addressing");
+ return MatchOperand_ParseFail;
+ }
+
+ if (Length && MemKind != BDLMem)
+ {
+ Error(StartLoc, "invalid use of length addressing");
+ return MatchOperand_ParseFail;
+ }
+
+ if (!Length && MemKind == BDLMem)
+ {
+ Error(StartLoc, "missing length in address");
+ return MatchOperand_ParseFail;
+ }
+
+ SMLoc EndLoc =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(SystemZOperand::createMem(RegKind, Base, Disp, Index,
+ Length, StartLoc, EndLoc));
+ return MatchOperand_Success;
+}
+
+bool SystemZAsmParser::ParseDirective(AsmToken DirectiveID) {
+ return true;
+}
+
+bool SystemZAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) {
+ Register Reg;
+ if (parseRegister(Reg))
+ return true;
+ if (Reg.Group == RegGR)
+ RegNo = SystemZMC::GR64Regs[Reg.Num];
+ else if (Reg.Group == RegFP)
+ RegNo = SystemZMC::FP64Regs[Reg.Num];
+ else
+ // FIXME: Access registers aren't modelled as LLVM registers yet.
+ return Error(Reg.StartLoc, "invalid operand for instruction");
+ StartLoc = Reg.StartLoc;
+ EndLoc = Reg.EndLoc;
+ return false;
+}
+
+bool SystemZAsmParser::
+ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ Operands.push_back(SystemZOperand::createToken(Name, NameLoc));
+
+ // Read the remaining operands.
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ // Read the first operand.
+ if (parseOperand(Operands, Name)) {
+ Parser.eatToEndOfStatement();
+ return true;
+ }
+
+ // Read any subsequent operands.
+ while (getLexer().is(AsmToken::Comma)) {
+ Parser.Lex();
+ if (parseOperand(Operands, Name)) {
+ Parser.eatToEndOfStatement();
+ return true;
+ }
+ }
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ SMLoc Loc = getLexer().getLoc();
+ Parser.eatToEndOfStatement();
+ return Error(Loc, "unexpected token in argument list");
+ }
+ }
+
+ // Consume the EndOfStatement.
+ Parser.Lex();
+ return false;
+}
+
+bool SystemZAsmParser::
+parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ StringRef Mnemonic) {
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+ if (ResTy == MatchOperand_Success)
+ return false;
+
+ // If there wasn't a custom match, try the generic matcher below. Otherwise,
+ // there was a match, but an error occurred, in which case, just return that
+ // the operand parsing failed.
+ if (ResTy == MatchOperand_ParseFail)
+ return true;
+
+ // Check for a register. All real register operands should have used
+ // a context-dependent parse routine, which gives the required register
+ // class. The code is here to mop up other cases, like those where
+ // the instruction isn't recognized.
+ if (Parser.getTok().is(AsmToken::Percent)) {
+ Register Reg;
+ if (parseRegister(Reg))
+ return true;
+ Operands.push_back(SystemZOperand::createInvalid(Reg.StartLoc, Reg.EndLoc));
+ return false;
+ }
+
+ // The only other type of operand is an immediate or address. As above,
+ // real address operands should have used a context-dependent parse routine,
+ // so we treat any plain expression as an immediate.
+ SMLoc StartLoc = Parser.getTok().getLoc();
+ unsigned Base, Index;
+ const MCExpr *Expr, *Length;
+ if (parseAddress(Base, Expr, Index, Length, SystemZMC::GR64Regs, ADDR64Reg))
+ return true;
+
+ SMLoc EndLoc =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ if (Base || Index || Length)
+ Operands.push_back(SystemZOperand::createInvalid(StartLoc, EndLoc));
+ else
+ Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
+ return false;
+}
+
+bool SystemZAsmParser::
+MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCStreamer &Out, unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
+ MCInst Inst;
+ unsigned MatchResult;
+
+ MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
+ MatchingInlineAsm);
+ switch (MatchResult) {
+ default: break;
+ case Match_Success:
+ Inst.setLoc(IDLoc);
+ Out.EmitInstruction(Inst);
+ return false;
+
+ case Match_MissingFeature: {
+ assert(ErrorInfo && "Unknown missing feature!");
+ // Special case the error message for the very common case where only
+ // a single subtarget feature is missing
+ std::string Msg = "instruction requires:";
+ unsigned Mask = 1;
+ for (unsigned I = 0; I < sizeof(ErrorInfo) * 8 - 1; ++I) {
+ if (ErrorInfo & Mask) {
+ Msg += " ";
+ Msg += getSubtargetFeatureName(ErrorInfo & Mask);
+ }
+ Mask <<= 1;
+ }
+ return Error(IDLoc, Msg);
+ }
+
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((SystemZOperand*)Operands[ErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ }
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+
+ case Match_MnemonicFail:
+ return Error(IDLoc, "invalid instruction");
+ }
+
+ llvm_unreachable("Unexpected match type");
+}
+
+SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
+parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ if (Parser.getTok().isNot(AsmToken::Percent))
+ return MatchOperand_NoMatch;
+
+ Register Reg;
+ if (parseRegister(Reg, RegAccess, 0))
+ return MatchOperand_ParseFail;
+
+ Operands.push_back(SystemZOperand::createAccessReg(Reg.Num,
+ Reg.StartLoc,
+ Reg.EndLoc));
+ return MatchOperand_Success;
+}
+
+SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
+parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ int64_t MinVal, int64_t MaxVal) {
+ MCContext &Ctx = getContext();
+ MCStreamer &Out = getStreamer();
+ const MCExpr *Expr;
+ SMLoc StartLoc = Parser.getTok().getLoc();
+ if (getParser().parseExpression(Expr))
+ return MatchOperand_NoMatch;
+
+ // For consistency with the GNU assembler, treat immediates as offsets
+ // from ".".
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) {
+ int64_t Value = CE->getValue();
+ if ((Value & 1) || Value < MinVal || Value > MaxVal) {
+ Error(StartLoc, "offset out of range");
+ return MatchOperand_ParseFail;
+ }
+ MCSymbol *Sym = Ctx.CreateTempSymbol();
+ Out.EmitLabel(Sym);
+ const MCExpr *Base = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None,
+ Ctx);
+ Expr = Value == 0 ? Base : MCBinaryExpr::CreateAdd(Base, Expr, Ctx);
+ }
+
+ SMLoc EndLoc =
+ SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Operands.push_back(SystemZOperand::createImm(Expr, StartLoc, EndLoc));
+ return MatchOperand_Success;
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeSystemZAsmParser() {
+ RegisterMCAsmParser<SystemZAsmParser> X(TheSystemZTarget);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
new file mode 100644
index 000000000000..fc3c38d2f343
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
@@ -0,0 +1,323 @@
+//===-- SystemZDisassembler.cpp - Disassembler for SystemZ ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZ.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+typedef MCDisassembler::DecodeStatus DecodeStatus;
+
+namespace {
+class SystemZDisassembler : public MCDisassembler {
+public:
+ SystemZDisassembler(const MCSubtargetInfo &STI)
+ : MCDisassembler(STI) {}
+ virtual ~SystemZDisassembler() {}
+
+ // Override MCDisassembler.
+ virtual DecodeStatus getInstruction(MCInst &instr,
+ uint64_t &size,
+ const MemoryObject &region,
+ uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const LLVM_OVERRIDE;
+};
+} // end anonymous namespace
+
+static MCDisassembler *createSystemZDisassembler(const Target &T,
+ const MCSubtargetInfo &STI) {
+ return new SystemZDisassembler(STI);
+}
+
+extern "C" void LLVMInitializeSystemZDisassembler() {
+ // Register the disassembler.
+ TargetRegistry::RegisterMCDisassembler(TheSystemZTarget,
+ createSystemZDisassembler);
+}
+
+static DecodeStatus decodeRegisterClass(MCInst &Inst, uint64_t RegNo,
+ const unsigned *Regs) {
+ assert(RegNo < 16 && "Invalid register");
+ RegNo = Regs[RegNo];
+ if (RegNo == 0)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::CreateReg(RegNo));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeGR32BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR32Regs);
+}
+
+static DecodeStatus DecodeGRH32BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GRH32Regs);
+}
+
+static DecodeStatus DecodeGR64BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus DecodeGR128BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR128Regs);
+}
+
+static DecodeStatus DecodeADDR64BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus DecodeFP32BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::FP32Regs);
+}
+
+static DecodeStatus DecodeFP64BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::FP64Regs);
+}
+
+static DecodeStatus DecodeFP128BitRegisterClass(MCInst &Inst, uint64_t RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeRegisterClass(Inst, RegNo, SystemZMC::FP128Regs);
+}
+
+template<unsigned N>
+static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ return MCDisassembler::Success;
+}
+
+template<unsigned N>
+static DecodeStatus decodeSImmOperand(MCInst &Inst, uint64_t Imm) {
+ assert(isUInt<N>(Imm) && "Invalid immediate");
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<N>(Imm)));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeAccessRegOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeUImmOperand<4>(Inst, Imm);
+}
+
+static DecodeStatus decodeU4ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<4>(Inst, Imm);
+}
+
+static DecodeStatus decodeU6ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<6>(Inst, Imm);
+}
+
+static DecodeStatus decodeU8ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<8>(Inst, Imm);
+}
+
+static DecodeStatus decodeU16ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<16>(Inst, Imm);
+}
+
+static DecodeStatus decodeU32ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeUImmOperand<32>(Inst, Imm);
+}
+
+static DecodeStatus decodeS8ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeSImmOperand<8>(Inst, Imm);
+}
+
+static DecodeStatus decodeS16ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeSImmOperand<16>(Inst, Imm);
+}
+
+static DecodeStatus decodeS32ImmOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address, const void *Decoder) {
+ return decodeSImmOperand<32>(Inst, Imm);
+}
+
+template<unsigned N>
+static DecodeStatus decodePCDBLOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address) {
+ assert(isUInt<N>(Imm) && "Invalid PC-relative offset");
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<N>(Imm) * 2 + Address));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodePC16DBLOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodePCDBLOperand<16>(Inst, Imm, Address);
+}
+
+static DecodeStatus decodePC32DBLOperand(MCInst &Inst, uint64_t Imm,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodePCDBLOperand<32>(Inst, Imm, Address);
+}
+
+static DecodeStatus decodeBDAddr12Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Base = Field >> 12;
+ uint64_t Disp = Field & 0xfff;
+ assert(Base < 16 && "Invalid BDAddr12");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(Disp));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDAddr20Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Base = Field >> 20;
+ uint64_t Disp = ((Field << 12) & 0xff000) | ((Field >> 8) & 0xfff);
+ assert(Base < 16 && "Invalid BDAddr20");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<20>(Disp)));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDXAddr12Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Index = Field >> 16;
+ uint64_t Base = (Field >> 12) & 0xf;
+ uint64_t Disp = Field & 0xfff;
+ assert(Index < 16 && "Invalid BDXAddr12");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(Disp));
+ Inst.addOperand(MCOperand::CreateReg(Index == 0 ? 0 : Regs[Index]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDXAddr20Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Index = Field >> 24;
+ uint64_t Base = (Field >> 20) & 0xf;
+ uint64_t Disp = ((Field & 0xfff00) >> 8) | ((Field & 0xff) << 12);
+ assert(Index < 16 && "Invalid BDXAddr20");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(SignExtend64<20>(Disp)));
+ Inst.addOperand(MCOperand::CreateReg(Index == 0 ? 0 : Regs[Index]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDLAddr12Len8Operand(MCInst &Inst, uint64_t Field,
+ const unsigned *Regs) {
+ uint64_t Length = Field >> 16;
+ uint64_t Base = (Field >> 12) & 0xf;
+ uint64_t Disp = Field & 0xfff;
+ assert(Length < 256 && "Invalid BDLAddr12Len8");
+ Inst.addOperand(MCOperand::CreateReg(Base == 0 ? 0 : Regs[Base]));
+ Inst.addOperand(MCOperand::CreateImm(Disp));
+ Inst.addOperand(MCOperand::CreateImm(Length + 1));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus decodeBDAddr32Disp12Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr12Operand(Inst, Field, SystemZMC::GR32Regs);
+}
+
+static DecodeStatus decodeBDAddr32Disp20Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr20Operand(Inst, Field, SystemZMC::GR32Regs);
+}
+
+static DecodeStatus decodeBDAddr64Disp12Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr12Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDAddr64Disp20Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDAddr20Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDXAddr64Disp12Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDXAddr12Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDXAddr64Disp20Operand(MCInst &Inst, uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDXAddr20Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+static DecodeStatus decodeBDLAddr64Disp12Len8Operand(MCInst &Inst,
+ uint64_t Field,
+ uint64_t Address,
+ const void *Decoder) {
+ return decodeBDLAddr12Len8Operand(Inst, Field, SystemZMC::GR64Regs);
+}
+
+#include "SystemZGenDisassemblerTables.inc"
+
+DecodeStatus SystemZDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
+ const MemoryObject &Region,
+ uint64_t Address,
+ raw_ostream &os,
+ raw_ostream &cs) const {
+ // Get the first two bytes of the instruction.
+ uint8_t Bytes[6];
+ Size = 0;
+ if (Region.readBytes(Address, 2, Bytes) == -1)
+ return MCDisassembler::Fail;
+
+ // The top 2 bits of the first byte specify the size.
+ const uint8_t *Table;
+ if (Bytes[0] < 0x40) {
+ Size = 2;
+ Table = DecoderTable16;
+ } else if (Bytes[0] < 0xc0) {
+ Size = 4;
+ Table = DecoderTable32;
+ } else {
+ Size = 6;
+ Table = DecoderTable48;
+ }
+
+ // Read any remaining bytes.
+ if (Size > 2 && Region.readBytes(Address + 2, Size - 2, Bytes + 2) == -1)
+ return MCDisassembler::Fail;
+
+ // Construct the instruction.
+ uint64_t Inst = 0;
+ for (uint64_t I = 0; I < Size; ++I)
+ Inst = (Inst << 8) | Bytes[I];
+
+ return decodeInstruction(Table, MI, Inst, Address, this, STI);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
new file mode 100644
index 000000000000..e1e64d3ac177
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
@@ -0,0 +1,165 @@
+//===-- SystemZInstPrinter.cpp - Convert SystemZ MCInst to assembly syntax ===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "asm-printer"
+
+#include "SystemZInstPrinter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#include "SystemZGenAsmWriter.inc"
+
+void SystemZInstPrinter::printAddress(unsigned Base, int64_t Disp,
+ unsigned Index, raw_ostream &O) {
+ O << Disp;
+ if (Base) {
+ O << '(';
+ if (Index)
+ O << '%' << getRegisterName(Index) << ',';
+ O << '%' << getRegisterName(Base) << ')';
+ } else
+ assert(!Index && "Shouldn't have an index without a base");
+}
+
+void SystemZInstPrinter::printOperand(const MCOperand &MO, raw_ostream &O) {
+ if (MO.isReg())
+ O << '%' << getRegisterName(MO.getReg());
+ else if (MO.isImm())
+ O << MO.getImm();
+ else if (MO.isExpr())
+ O << *MO.getExpr();
+ else
+ llvm_unreachable("Invalid operand");
+}
+
+void SystemZInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot) {
+ printInstruction(MI, O);
+ printAnnotation(O, Annot);
+}
+
+void SystemZInstPrinter::printRegName(raw_ostream &O, unsigned RegNo) const {
+ O << '%' << getRegisterName(RegNo);
+}
+
+void SystemZInstPrinter::printU4ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isUInt<4>(Value) && "Invalid u4imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printU6ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isUInt<6>(Value) && "Invalid u6imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printS8ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isInt<8>(Value) && "Invalid s8imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printU8ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isUInt<8>(Value) && "Invalid u8imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printS16ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isInt<16>(Value) && "Invalid s16imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printU16ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isUInt<16>(Value) && "Invalid u16imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printS32ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isInt<32>(Value) && "Invalid s32imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printU32ImmOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ int64_t Value = MI->getOperand(OpNum).getImm();
+ assert(isUInt<32>(Value) && "Invalid u32imm argument");
+ O << Value;
+}
+
+void SystemZInstPrinter::printAccessRegOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ uint64_t Value = MI->getOperand(OpNum).getImm();
+ assert(Value < 16 && "Invalid access register number");
+ O << "%a" << (unsigned int)Value;
+}
+
+void SystemZInstPrinter::printPCRelOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand(OpNum);
+ if (MO.isImm()) {
+ O << "0x";
+ O.write_hex(MO.getImm());
+ } else
+ O << *MO.getExpr();
+}
+
+void SystemZInstPrinter::printOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ printOperand(MI->getOperand(OpNum), O);
+}
+
+void SystemZInstPrinter::printBDAddrOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ printAddress(MI->getOperand(OpNum).getReg(),
+ MI->getOperand(OpNum + 1).getImm(), 0, O);
+}
+
+void SystemZInstPrinter::printBDXAddrOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ printAddress(MI->getOperand(OpNum).getReg(),
+ MI->getOperand(OpNum + 1).getImm(),
+ MI->getOperand(OpNum + 2).getReg(), O);
+}
+
+void SystemZInstPrinter::printBDLAddrOperand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ unsigned Base = MI->getOperand(OpNum).getReg();
+ uint64_t Disp = MI->getOperand(OpNum + 1).getImm();
+ uint64_t Length = MI->getOperand(OpNum + 2).getImm();
+ O << Disp << '(' << Length;
+ if (Base)
+ O << ",%" << getRegisterName(Base);
+ O << ')';
+}
+
+void SystemZInstPrinter::printCond4Operand(const MCInst *MI, int OpNum,
+ raw_ostream &O) {
+ static const char *const CondNames[] = {
+ "o", "h", "nle", "l", "nhe", "lh", "ne",
+ "e", "nlh", "he", "nl", "le", "nh", "no"
+ };
+ uint64_t Imm = MI->getOperand(OpNum).getImm();
+ assert(Imm > 0 && Imm < 15 && "Invalid condition");
+ O << CondNames[Imm - 1];
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
new file mode 100644
index 000000000000..734ecf0ff230
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
@@ -0,0 +1,69 @@
+//==- SystemZInstPrinter.h - Convert SystemZ MCInst to assembly --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints a SystemZ MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEMZINSTPRINTER_H
+#define LLVM_SYSTEMZINSTPRINTER_H
+
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class MCOperand;
+
+class SystemZInstPrinter : public MCInstPrinter {
+public:
+ SystemZInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+ // Automatically generated by tblgen.
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+
+ // Print an address with the given base, displacement and index.
+ static void printAddress(unsigned Base, int64_t Disp, unsigned Index,
+ raw_ostream &O);
+
+ // Print the given operand.
+ static void printOperand(const MCOperand &MO, raw_ostream &O);
+
+ // Override MCInstPrinter.
+ virtual void printRegName(raw_ostream &O, unsigned RegNo) const
+ LLVM_OVERRIDE;
+ virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot)
+ LLVM_OVERRIDE;
+
+private:
+ // Print various types of operand.
+ void printOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printBDAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printBDXAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printBDLAddrOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printU4ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printU6ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printS8ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printU8ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printS16ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printU16ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printS32ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printU32ImmOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printPCRelOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+ void printAccessRegOperand(const MCInst *MI, int OpNum, raw_ostream &O);
+
+ // Print the mnemonic for a condition-code mask ("ne", "lh", etc.)
+ // This forms part of the instruction name rather than the operand list.
+ void printCond4Operand(const MCInst *MI, int OpNum, raw_ostream &O);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
new file mode 100644
index 000000000000..26a8faeea10b
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -0,0 +1,126 @@
+//===-- SystemZMCAsmBackend.cpp - SystemZ assembler backend ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
+#include "MCTargetDesc/SystemZMCFixups.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectWriter.h"
+
+using namespace llvm;
+
+// Value is a fully-resolved relocation value: Symbol + Addend [- Pivot].
+// Return the bits that should be installed in a relocation field for
+// fixup kind Kind.
+static uint64_t extractBitsForFixup(MCFixupKind Kind, uint64_t Value) {
+ if (Kind < FirstTargetFixupKind)
+ return Value;
+
+ switch (unsigned(Kind)) {
+ case SystemZ::FK_390_PC16DBL:
+ case SystemZ::FK_390_PC32DBL:
+ case SystemZ::FK_390_PLT16DBL:
+ case SystemZ::FK_390_PLT32DBL:
+ return (int64_t)Value / 2;
+ }
+
+ llvm_unreachable("Unknown fixup kind!");
+}
+
+namespace {
+class SystemZMCAsmBackend : public MCAsmBackend {
+ uint8_t OSABI;
+public:
+ SystemZMCAsmBackend(uint8_t osABI)
+ : OSABI(osABI) {}
+
+ // Override MCAsmBackend
+ virtual unsigned getNumFixupKinds() const LLVM_OVERRIDE {
+ return SystemZ::NumTargetFixupKinds;
+ }
+ virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const
+ LLVM_OVERRIDE;
+ virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value) const LLVM_OVERRIDE;
+ virtual bool mayNeedRelaxation(const MCInst &Inst) const LLVM_OVERRIDE {
+ return false;
+ }
+ virtual bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCRelaxableFragment *Fragment,
+ const MCAsmLayout &Layout) const
+ LLVM_OVERRIDE {
+ return false;
+ }
+ virtual void relaxInstruction(const MCInst &Inst,
+ MCInst &Res) const LLVM_OVERRIDE {
+ llvm_unreachable("SystemZ does do not have assembler relaxation");
+ }
+ virtual bool writeNopData(uint64_t Count,
+ MCObjectWriter *OW) const LLVM_OVERRIDE;
+ virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const
+ LLVM_OVERRIDE {
+ return createSystemZObjectWriter(OS, OSABI);
+ }
+ virtual bool doesSectionRequireSymbols(const MCSection &Section) const
+ LLVM_OVERRIDE {
+ return false;
+ }
+};
+} // end anonymous namespace
+
+const MCFixupKindInfo &
+SystemZMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[SystemZ::NumTargetFixupKinds] = {
+ { "FK_390_PC16DBL", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_390_PC32DBL", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_390_PLT16DBL", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "FK_390_PLT32DBL", 0, 32, MCFixupKindInfo::FKF_IsPCRel }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+}
+
+void SystemZMCAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
+ unsigned DataSize, uint64_t Value) const {
+ MCFixupKind Kind = Fixup.getKind();
+ unsigned Offset = Fixup.getOffset();
+ unsigned Size = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
+
+ assert(Offset + Size <= DataSize && "Invalid fixup offset!");
+
+ // Big-endian insertion of Size bytes.
+ Value = extractBitsForFixup(Kind, Value);
+ unsigned ShiftValue = (Size * 8) - 8;
+ for (unsigned I = 0; I != Size; ++I) {
+ Data[Offset + I] |= uint8_t(Value >> ShiftValue);
+ ShiftValue -= 8;
+ }
+}
+
+bool SystemZMCAsmBackend::writeNopData(uint64_t Count,
+ MCObjectWriter *OW) const {
+ for (uint64_t I = 0; I != Count; ++I)
+ OW->Write8(7);
+ return true;
+}
+
+MCAsmBackend *llvm::createSystemZMCAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU) {
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(Triple(TT).getOS());
+ return new SystemZMCAsmBackend(OSABI);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
new file mode 100644
index 000000000000..965c41e2d151
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -0,0 +1,36 @@
+//===-- SystemZMCAsmInfo.cpp - SystemZ asm properties ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZMCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+
+using namespace llvm;
+
+SystemZMCAsmInfo::SystemZMCAsmInfo(StringRef TT) {
+ PointerSize = 8;
+ CalleeSaveStackSlotSize = 8;
+ IsLittleEndian = false;
+
+ CommentString = "#";
+ GlobalPrefix = "";
+ PrivateGlobalPrefix = ".L";
+ ZeroDirective = "\t.space\t";
+ Data64bitsDirective = "\t.quad\t";
+ UsesELFSectionDirectiveForBSS = true;
+ SupportsDebugInformation = true;
+ HasLEB128 = true;
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+}
+
+const MCSection *
+SystemZMCAsmInfo::getNonexecutableStackSection(MCContext &Ctx) const {
+ return Ctx.getELFSection(".note.GNU-stack", ELF::SHT_PROGBITS,
+ 0, SectionKind::getMetadata());
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
new file mode 100644
index 000000000000..b9ac92a6934f
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
@@ -0,0 +1,30 @@
+//====-- SystemZMCAsmInfo.h - SystemZ asm properties -----------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SystemZTARGETASMINFO_H
+#define SystemZTARGETASMINFO_H
+
+#include "llvm/MC/MCAsmInfoELF.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class StringRef;
+
+class SystemZMCAsmInfo : public MCAsmInfoELF {
+public:
+ explicit SystemZMCAsmInfo(StringRef TT);
+
+ // Override MCAsmInfo;
+ virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const
+ LLVM_OVERRIDE;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
new file mode 100644
index 000000000000..f07ea7b31e6a
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
@@ -0,0 +1,187 @@
+//===-- SystemZMCCodeEmitter.cpp - Convert SystemZ code to machine code ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SystemZMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mccodeemitter"
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
+#include "MCTargetDesc/SystemZMCFixups.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstrInfo.h"
+
+using namespace llvm;
+
+namespace {
+class SystemZMCCodeEmitter : public MCCodeEmitter {
+ const MCInstrInfo &MCII;
+ MCContext &Ctx;
+
+public:
+ SystemZMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
+ : MCII(mcii), Ctx(ctx) {
+ }
+
+ ~SystemZMCCodeEmitter() {}
+
+ // OVerride MCCodeEmitter.
+ virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const
+ LLVM_OVERRIDE;
+
+private:
+ // Automatically generated by TableGen.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // Called by the TableGen code to get the binary encoding of operand
+ // MO in MI. Fixups is the list of fixups against MI.
+ uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // Called by the TableGen code to get the binary encoding of an address.
+ // The index or length, if any, is encoded first, followed by the base,
+ // followed by the displacement. In a 20-bit displacement,
+ // the low 12 bits are encoded before the high 8 bits.
+ uint64_t getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ uint64_t getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ // Operand OpNum of MI needs a PC-relative fixup of kind Kind at
+ // Offset bytes from the start of MI. Add the fixup to Fixups
+ // and return the in-place addend, which since we're a RELA target
+ // is always 0.
+ uint64_t getPCRelEncoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups,
+ unsigned Kind, int64_t Offset) const;
+
+ uint64_t getPC16DBLEncoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC16DBL, 2);
+ }
+ uint64_t getPC32DBLEncoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC32DBL, 2);
+ }
+};
+}
+
+MCCodeEmitter *llvm::createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &MCSTI,
+ MCContext &Ctx) {
+ return new SystemZMCCodeEmitter(MCII, Ctx);
+}
+
+void SystemZMCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Bits = getBinaryCodeForInstr(MI, Fixups);
+ unsigned Size = MCII.get(MI.getOpcode()).getSize();
+ // Big-endian insertion of Size bytes.
+ unsigned ShiftValue = (Size * 8) - 8;
+ for (unsigned I = 0; I != Size; ++I) {
+ OS << uint8_t(Bits >> ShiftValue);
+ ShiftValue -= 8;
+ }
+}
+
+uint64_t SystemZMCCodeEmitter::
+getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MO.isReg())
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
+ if (MO.isImm())
+ return static_cast<uint64_t>(MO.getImm());
+ llvm_unreachable("Unexpected operand type!");
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ assert(isUInt<4>(Base) && isUInt<12>(Disp));
+ return (Base << 12) | Disp;
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ assert(isUInt<4>(Base) && isInt<20>(Disp));
+ return (Base << 20) | ((Disp & 0xfff) << 8) | ((Disp & 0xff000) >> 12);
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<4>(Index));
+ return (Index << 16) | (Base << 12) | Disp;
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ assert(isUInt<4>(Base) && isInt<20>(Disp) && isUInt<4>(Index));
+ return (Index << 24) | (Base << 20) | ((Disp & 0xfff) << 8)
+ | ((Disp & 0xff000) >> 12);
+}
+
+uint64_t SystemZMCCodeEmitter::
+getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups) - 1;
+ assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<8>(Len));
+ return (Len << 16) | (Base << 12) | Disp;
+}
+
+uint64_t
+SystemZMCCodeEmitter::getPCRelEncoding(const MCInst &MI, unsigned OpNum,
+ SmallVectorImpl<MCFixup> &Fixups,
+ unsigned Kind, int64_t Offset) const {
+ const MCOperand &MO = MI.getOperand(OpNum);
+ const MCExpr *Expr;
+ if (MO.isImm())
+ Expr = MCConstantExpr::Create(MO.getImm() + Offset, Ctx);
+ else {
+ Expr = MO.getExpr();
+ if (Offset) {
+ // The operand value is relative to the start of MI, but the fixup
+ // is relative to the operand field itself, which is Offset bytes
+ // into MI. Add Offset to the relocation value to cancel out
+ // this difference.
+ const MCExpr *OffsetExpr = MCConstantExpr::Create(Offset, Ctx);
+ Expr = MCBinaryExpr::CreateAdd(Expr, OffsetExpr, Ctx);
+ }
+ }
+ Fixups.push_back(MCFixup::Create(Offset, Expr, (MCFixupKind)Kind));
+ return 0;
+}
+
+#include "SystemZGenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
new file mode 100644
index 000000000000..9c94ebbaac78
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
@@ -0,0 +1,31 @@
+//===-- SystemZMCFixups.h - SystemZ-specific fixup entries ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEMZMCFIXUPS_H
+#define LLVM_SYSTEMZMCFIXUPS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace SystemZ {
+ enum FixupKind {
+ // These correspond directly to R_390_* relocations.
+ FK_390_PC16DBL = FirstTargetFixupKind,
+ FK_390_PC32DBL,
+ FK_390_PLT16DBL,
+ FK_390_PLT32DBL,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+ };
+}
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
new file mode 100644
index 000000000000..36e3d83d4d59
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
@@ -0,0 +1,140 @@
+//===-- SystemZMCObjectWriter.cpp - SystemZ ELF writer --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
+#include "MCTargetDesc/SystemZMCFixups.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCValue.h"
+
+using namespace llvm;
+
+namespace {
+class SystemZObjectWriter : public MCELFObjectTargetWriter {
+public:
+ SystemZObjectWriter(uint8_t OSABI);
+
+ virtual ~SystemZObjectWriter();
+
+protected:
+ // Override MCELFObjectTargetWriter.
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel, bool IsRelocWithSymbol,
+ int64_t Addend) const LLVM_OVERRIDE;
+ virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const LLVM_OVERRIDE;
+};
+} // end anonymouse namespace
+
+SystemZObjectWriter::SystemZObjectWriter(uint8_t OSABI)
+ : MCELFObjectTargetWriter(/*Is64Bit=*/true, OSABI, ELF::EM_S390,
+ /*HasRelocationAddend=*/ true) {}
+
+SystemZObjectWriter::~SystemZObjectWriter() {
+}
+
+// Return the relocation type for an absolute value of MCFixupKind Kind.
+static unsigned getAbsoluteReloc(unsigned Kind) {
+ switch (Kind) {
+ case FK_Data_1: return ELF::R_390_8;
+ case FK_Data_2: return ELF::R_390_16;
+ case FK_Data_4: return ELF::R_390_32;
+ case FK_Data_8: return ELF::R_390_64;
+ }
+ llvm_unreachable("Unsupported absolute address");
+}
+
+// Return the relocation type for a PC-relative value of MCFixupKind Kind.
+static unsigned getPCRelReloc(unsigned Kind) {
+ switch (Kind) {
+ case FK_Data_2: return ELF::R_390_PC16;
+ case FK_Data_4: return ELF::R_390_PC32;
+ case FK_Data_8: return ELF::R_390_PC64;
+ case SystemZ::FK_390_PC16DBL: return ELF::R_390_PC16DBL;
+ case SystemZ::FK_390_PC32DBL: return ELF::R_390_PC32DBL;
+ case SystemZ::FK_390_PLT16DBL: return ELF::R_390_PLT16DBL;
+ case SystemZ::FK_390_PLT32DBL: return ELF::R_390_PLT32DBL;
+ }
+ llvm_unreachable("Unsupported PC-relative address");
+}
+
+// Return the R_390_TLS_LE* relocation type for MCFixupKind Kind.
+static unsigned getTLSLEReloc(unsigned Kind) {
+ switch (Kind) {
+ case FK_Data_4: return ELF::R_390_TLS_LE32;
+ case FK_Data_8: return ELF::R_390_TLS_LE64;
+ }
+ llvm_unreachable("Unsupported absolute address");
+}
+
+// Return the PLT relocation counterpart of MCFixupKind Kind.
+static unsigned getPLTReloc(unsigned Kind) {
+ switch (Kind) {
+ case SystemZ::FK_390_PC16DBL: return ELF::R_390_PLT16DBL;
+ case SystemZ::FK_390_PC32DBL: return ELF::R_390_PLT32DBL;
+ }
+ llvm_unreachable("Unsupported absolute address");
+}
+
+unsigned SystemZObjectWriter::GetRelocType(const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel,
+ bool IsRelocWithSymbol,
+ int64_t Addend) const {
+ MCSymbolRefExpr::VariantKind Modifier = (Target.isAbsolute() ?
+ MCSymbolRefExpr::VK_None :
+ Target.getSymA()->getKind());
+ unsigned Kind = Fixup.getKind();
+ switch (Modifier) {
+ case MCSymbolRefExpr::VK_None:
+ if (IsPCRel)
+ return getPCRelReloc(Kind);
+ return getAbsoluteReloc(Kind);
+
+ case MCSymbolRefExpr::VK_NTPOFF:
+ assert(!IsPCRel && "NTPOFF shouldn't be PC-relative");
+ return getTLSLEReloc(Kind);
+
+ case MCSymbolRefExpr::VK_GOT:
+ if (IsPCRel && Kind == SystemZ::FK_390_PC32DBL)
+ return ELF::R_390_GOTENT;
+ llvm_unreachable("Only PC-relative GOT accesses are supported for now");
+
+ case MCSymbolRefExpr::VK_PLT:
+ assert(IsPCRel && "@PLT shouldt be PC-relative");
+ return getPLTReloc(Kind);
+
+ default:
+ llvm_unreachable("Modifier not supported");
+ }
+}
+
+const MCSymbol *SystemZObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
+ const MCValue &Target,
+ const MCFragment &F,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ // The addend in a PC-relative R_390_* relocation is always applied to
+ // the PC-relative part of the address. If some kind of indirection
+ // is applied to the symbol first, we can't use an addend there too.
+ if (!Target.isAbsolute() &&
+ Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None &&
+ IsPCRel)
+ return &Target.getSymA()->getSymbol().AliasedSymbol();
+ return NULL;
+}
+
+MCObjectWriter *llvm::createSystemZObjectWriter(raw_ostream &OS,
+ uint8_t OSABI) {
+ MCELFObjectTargetWriter *MOTW = new SystemZObjectWriter(OSABI);
+ return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/false);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
new file mode 100644
index 000000000000..9e1296b912a0
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
@@ -0,0 +1,229 @@
+//===-- SystemZMCTargetDesc.cpp - SystemZ target descriptions -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZMCTargetDesc.h"
+#include "InstPrinter/SystemZInstPrinter.h"
+#include "SystemZMCAsmInfo.h"
+#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#define GET_INSTRINFO_MC_DESC
+#include "SystemZGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "SystemZGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "SystemZGenRegisterInfo.inc"
+
+using namespace llvm;
+
+const unsigned SystemZMC::GR32Regs[16] = {
+ SystemZ::R0L, SystemZ::R1L, SystemZ::R2L, SystemZ::R3L,
+ SystemZ::R4L, SystemZ::R5L, SystemZ::R6L, SystemZ::R7L,
+ SystemZ::R8L, SystemZ::R9L, SystemZ::R10L, SystemZ::R11L,
+ SystemZ::R12L, SystemZ::R13L, SystemZ::R14L, SystemZ::R15L
+};
+
+const unsigned SystemZMC::GRH32Regs[16] = {
+ SystemZ::R0H, SystemZ::R1H, SystemZ::R2H, SystemZ::R3H,
+ SystemZ::R4H, SystemZ::R5H, SystemZ::R6H, SystemZ::R7H,
+ SystemZ::R8H, SystemZ::R9H, SystemZ::R10H, SystemZ::R11H,
+ SystemZ::R12H, SystemZ::R13H, SystemZ::R14H, SystemZ::R15H
+};
+
+const unsigned SystemZMC::GR64Regs[16] = {
+ SystemZ::R0D, SystemZ::R1D, SystemZ::R2D, SystemZ::R3D,
+ SystemZ::R4D, SystemZ::R5D, SystemZ::R6D, SystemZ::R7D,
+ SystemZ::R8D, SystemZ::R9D, SystemZ::R10D, SystemZ::R11D,
+ SystemZ::R12D, SystemZ::R13D, SystemZ::R14D, SystemZ::R15D
+};
+
+const unsigned SystemZMC::GR128Regs[16] = {
+ SystemZ::R0Q, 0, SystemZ::R2Q, 0,
+ SystemZ::R4Q, 0, SystemZ::R6Q, 0,
+ SystemZ::R8Q, 0, SystemZ::R10Q, 0,
+ SystemZ::R12Q, 0, SystemZ::R14Q, 0
+};
+
+const unsigned SystemZMC::FP32Regs[16] = {
+ SystemZ::F0S, SystemZ::F1S, SystemZ::F2S, SystemZ::F3S,
+ SystemZ::F4S, SystemZ::F5S, SystemZ::F6S, SystemZ::F7S,
+ SystemZ::F8S, SystemZ::F9S, SystemZ::F10S, SystemZ::F11S,
+ SystemZ::F12S, SystemZ::F13S, SystemZ::F14S, SystemZ::F15S
+};
+
+const unsigned SystemZMC::FP64Regs[16] = {
+ SystemZ::F0D, SystemZ::F1D, SystemZ::F2D, SystemZ::F3D,
+ SystemZ::F4D, SystemZ::F5D, SystemZ::F6D, SystemZ::F7D,
+ SystemZ::F8D, SystemZ::F9D, SystemZ::F10D, SystemZ::F11D,
+ SystemZ::F12D, SystemZ::F13D, SystemZ::F14D, SystemZ::F15D
+};
+
+const unsigned SystemZMC::FP128Regs[16] = {
+ SystemZ::F0Q, SystemZ::F1Q, 0, 0,
+ SystemZ::F4Q, SystemZ::F5Q, 0, 0,
+ SystemZ::F8Q, SystemZ::F9Q, 0, 0,
+ SystemZ::F12Q, SystemZ::F13Q, 0, 0
+};
+
+unsigned SystemZMC::getFirstReg(unsigned Reg) {
+ static unsigned Map[SystemZ::NUM_TARGET_REGS];
+ static bool Initialized = false;
+ if (!Initialized) {
+ for (unsigned I = 0; I < 16; ++I) {
+ Map[GR32Regs[I]] = I;
+ Map[GRH32Regs[I]] = I;
+ Map[GR64Regs[I]] = I;
+ Map[GR128Regs[I]] = I;
+ Map[FP32Regs[I]] = I;
+ Map[FP64Regs[I]] = I;
+ Map[FP128Regs[I]] = I;
+ }
+ }
+ assert(Reg < SystemZ::NUM_TARGET_REGS);
+ return Map[Reg];
+}
+
+static MCAsmInfo *createSystemZMCAsmInfo(const MCRegisterInfo &MRI,
+ StringRef TT) {
+ MCAsmInfo *MAI = new SystemZMCAsmInfo(TT);
+ MCCFIInstruction Inst =
+ MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(SystemZ::R15D, true),
+ SystemZMC::CFAOffsetFromInitialSP);
+ MAI->addInitialFrameState(Inst);
+ return MAI;
+}
+
+static MCInstrInfo *createSystemZMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitSystemZMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createSystemZMCRegisterInfo(StringRef TT) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitSystemZMCRegisterInfo(X, SystemZ::R14D);
+ return X;
+}
+
+static MCSubtargetInfo *createSystemZMCSubtargetInfo(StringRef TT,
+ StringRef CPU,
+ StringRef FS) {
+ MCSubtargetInfo *X = new MCSubtargetInfo();
+ InitSystemZMCSubtargetInfo(X, TT, CPU, FS);
+ return X;
+}
+
+static MCCodeGenInfo *createSystemZMCCodeGenInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) {
+ MCCodeGenInfo *X = new MCCodeGenInfo();
+
+ // Static code is suitable for use in a dynamic executable; there is no
+ // separate DynamicNoPIC model.
+ if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC)
+ RM = Reloc::Static;
+
+ // For SystemZ we define the models as follows:
+ //
+ // Small: BRASL can call any function and will use a stub if necessary.
+ // Locally-binding symbols will always be in range of LARL.
+ //
+ // Medium: BRASL can call any function and will use a stub if necessary.
+ // GOT slots and locally-defined text will always be in range
+ // of LARL, but other symbols might not be.
+ //
+ // Large: Equivalent to Medium for now.
+ //
+ // Kernel: Equivalent to Medium for now.
+ //
+ // This means that any PIC module smaller than 4GB meets the
+ // requirements of Small, so Small seems like the best default there.
+ //
+ // All symbols bind locally in a non-PIC module, so the choice is less
+ // obvious. There are two cases:
+ //
+ // - When creating an executable, PLTs and copy relocations allow
+ // us to treat external symbols as part of the executable.
+ // Any executable smaller than 4GB meets the requirements of Small,
+ // so that seems like the best default.
+ //
+ // - When creating JIT code, stubs will be in range of BRASL if the
+ // image is less than 4GB in size. GOT entries will likewise be
+ // in range of LARL. However, the JIT environment has no equivalent
+ // of copy relocs, so locally-binding data symbols might not be in
+ // the range of LARL. We need the Medium model in that case.
+ if (CM == CodeModel::Default)
+ CM = CodeModel::Small;
+ else if (CM == CodeModel::JITDefault)
+ CM = RM == Reloc::PIC_ ? CodeModel::Small : CodeModel::Medium;
+ X->InitMCCodeGenInfo(RM, CM, OL);
+ return X;
+}
+
+static MCInstPrinter *createSystemZMCInstPrinter(const Target &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI) {
+ return new SystemZInstPrinter(MAI, MII, MRI);
+}
+
+static MCStreamer *createSystemZMCObjectStreamer(const Target &T, StringRef TT,
+ MCContext &Ctx,
+ MCAsmBackend &MAB,
+ raw_ostream &OS,
+ MCCodeEmitter *Emitter,
+ bool RelaxAll,
+ bool NoExecStack) {
+ return createELFStreamer(Ctx, 0, MAB, OS, Emitter, RelaxAll, NoExecStack);
+}
+
+extern "C" void LLVMInitializeSystemZTargetMC() {
+ // Register the MCAsmInfo.
+ TargetRegistry::RegisterMCAsmInfo(TheSystemZTarget,
+ createSystemZMCAsmInfo);
+
+ // Register the MCCodeGenInfo.
+ TargetRegistry::RegisterMCCodeGenInfo(TheSystemZTarget,
+ createSystemZMCCodeGenInfo);
+
+ // Register the MCCodeEmitter.
+ TargetRegistry::RegisterMCCodeEmitter(TheSystemZTarget,
+ createSystemZMCCodeEmitter);
+
+ // Register the MCInstrInfo.
+ TargetRegistry::RegisterMCInstrInfo(TheSystemZTarget,
+ createSystemZMCInstrInfo);
+
+ // Register the MCRegisterInfo.
+ TargetRegistry::RegisterMCRegInfo(TheSystemZTarget,
+ createSystemZMCRegisterInfo);
+
+ // Register the MCSubtargetInfo.
+ TargetRegistry::RegisterMCSubtargetInfo(TheSystemZTarget,
+ createSystemZMCSubtargetInfo);
+
+ // Register the MCAsmBackend.
+ TargetRegistry::RegisterMCAsmBackend(TheSystemZTarget,
+ createSystemZMCAsmBackend);
+
+ // Register the MCInstPrinter.
+ TargetRegistry::RegisterMCInstPrinter(TheSystemZTarget,
+ createSystemZMCInstPrinter);
+
+ // Register the MCObjectStreamer;
+ TargetRegistry::RegisterMCObjectStreamer(TheSystemZTarget,
+ createSystemZMCObjectStreamer);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
new file mode 100644
index 000000000000..97e325b984f6
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
@@ -0,0 +1,96 @@
+//===-- SystemZMCTargetDesc.h - SystemZ target descriptions -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZMCTARGETDESC_H
+#define SYSTEMZMCTARGETDESC_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class StringRef;
+class Target;
+class raw_ostream;
+
+extern Target TheSystemZTarget;
+
+namespace SystemZMC {
+ // How many bytes are in the ABI-defined, caller-allocated part of
+ // a stack frame.
+ const int64_t CallFrameSize = 160;
+
+ // The offset of the DWARF CFA from the incoming stack pointer.
+ const int64_t CFAOffsetFromInitialSP = CallFrameSize;
+
+ // Maps of asm register numbers to LLVM register numbers, with 0 indicating
+ // an invalid register. In principle we could use 32-bit and 64-bit register
+ // classes directly, provided that we relegated the GPR allocation order
+ // in SystemZRegisterInfo.td to an AltOrder and left the default order
+ // as %r0-%r15. It seems better to provide the same interface for
+ // all classes though.
+ extern const unsigned GR32Regs[16];
+ extern const unsigned GRH32Regs[16];
+ extern const unsigned GR64Regs[16];
+ extern const unsigned GR128Regs[16];
+ extern const unsigned FP32Regs[16];
+ extern const unsigned FP64Regs[16];
+ extern const unsigned FP128Regs[16];
+
+ // Return the 0-based number of the first architectural register that
+ // contains the given LLVM register. E.g. R1D -> 1.
+ unsigned getFirstReg(unsigned Reg);
+
+ // Return the given register as a GR64.
+ inline unsigned getRegAsGR64(unsigned Reg) {
+ return GR64Regs[getFirstReg(Reg)];
+ }
+
+ // Return the given register as a low GR32.
+ inline unsigned getRegAsGR32(unsigned Reg) {
+ return GR32Regs[getFirstReg(Reg)];
+ }
+
+ // Return the given register as a high GR32.
+ inline unsigned getRegAsGRH32(unsigned Reg) {
+ return GRH32Regs[getFirstReg(Reg)];
+ }
+}
+
+MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+
+MCAsmBackend *createSystemZMCAsmBackend(const Target &T,
+ const MCRegisterInfo &MRI,
+ StringRef TT, StringRef CPU);
+
+MCObjectWriter *createSystemZObjectWriter(raw_ostream &OS, uint8_t OSABI);
+} // end namespace llvm
+
+// Defines symbolic names for SystemZ registers.
+// This defines a mapping from register name to register number.
+#define GET_REGINFO_ENUM
+#include "SystemZGenRegisterInfo.inc"
+
+// Defines symbolic names for the SystemZ instructions.
+#define GET_INSTRINFO_ENUM
+#include "SystemZGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "SystemZGenSubtargetInfo.inc"
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/README.txt b/contrib/llvm/lib/Target/SystemZ/README.txt
new file mode 100644
index 000000000000..afa6cf090d07
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/README.txt
@@ -0,0 +1,181 @@
+//===---------------------------------------------------------------------===//
+// Random notes about and ideas for the SystemZ backend.
+//===---------------------------------------------------------------------===//
+
+The initial backend is deliberately restricted to z10. We should add support
+for later architectures at some point.
+
+--
+
+SystemZDAGToDAGISel::SelectInlineAsmMemoryOperand() is passed "m" for all
+inline asm memory constraints; it doesn't get to see the original constraint.
+This means that it must conservatively treat all inline asm constraints
+as the most restricted type, "R".
+
+--
+
+If an inline asm ties an i32 "r" result to an i64 input, the input
+will be treated as an i32, leaving the upper bits uninitialised.
+For example:
+
+define void @f4(i32 *%dst) {
+ %val = call i32 asm "blah $0", "=r,0" (i64 103)
+ store i32 %val, i32 *%dst
+ ret void
+}
+
+from CodeGen/SystemZ/asm-09.ll will use LHI rather than LGHI.
+to load 103. This seems to be a general target-independent problem.
+
+--
+
+The tuning of the choice between LOAD ADDRESS (LA) and addition in
+SystemZISelDAGToDAG.cpp is suspect. It should be tweaked based on
+performance measurements.
+
+--
+
+There is no scheduling support.
+
+--
+
+We don't use the BRANCH ON INDEX instructions.
+
+--
+
+We might want to use BRANCH ON CONDITION for conditional indirect calls
+and conditional returns.
+
+--
+
+We don't use the TEST DATA CLASS instructions.
+
+--
+
+We could use the generic floating-point forms of LOAD COMPLEMENT,
+LOAD NEGATIVE and LOAD POSITIVE in cases where we don't need the
+condition codes. For example, we could use LCDFR instead of LCDBR.
+
+--
+
+We only use MVC, XC and CLC for constant-length block operations.
+We could extend them to variable-length operations too,
+using EXECUTE RELATIVE LONG.
+
+MVCIN, MVCLE and CLCLE may be worthwhile too.
+
+--
+
+We don't use CUSE or the TRANSLATE family of instructions for string
+operations. The TRANSLATE ones are probably more difficult to exploit.
+
+--
+
+We don't take full advantage of builtins like fabsl because the calling
+conventions require f128s to be returned by invisible reference.
+
+--
+
+ADD LOGICAL WITH SIGNED IMMEDIATE could be useful when we need to
+produce a carry. SUBTRACT LOGICAL IMMEDIATE could be useful when we
+need to produce a borrow. (Note that there are no memory forms of
+ADD LOGICAL WITH CARRY and SUBTRACT LOGICAL WITH BORROW, so the high
+part of 128-bit memory operations would probably need to be done
+via a register.)
+
+--
+
+We don't use the halfword forms of LOAD REVERSED and STORE REVERSED
+(LRVH and STRVH).
+
+--
+
+We don't use ICM or STCM.
+
+--
+
+DAGCombiner doesn't yet fold truncations of extended loads. Functions like:
+
+ unsigned long f (unsigned long x, unsigned short *y)
+ {
+ return (x << 32) | *y;
+ }
+
+therefore end up as:
+
+ sllg %r2, %r2, 32
+ llgh %r0, 0(%r3)
+ lr %r2, %r0
+ br %r14
+
+but truncating the load would give:
+
+ sllg %r2, %r2, 32
+ lh %r2, 0(%r3)
+ br %r14
+
+--
+
+Functions like:
+
+define i64 @f1(i64 %a) {
+ %and = and i64 %a, 1
+ ret i64 %and
+}
+
+ought to be implemented as:
+
+ lhi %r0, 1
+ ngr %r2, %r0
+ br %r14
+
+but two-address optimisations reverse the order of the AND and force:
+
+ lhi %r0, 1
+ ngr %r0, %r2
+ lgr %r2, %r0
+ br %r14
+
+CodeGen/SystemZ/and-04.ll has several examples of this.
+
+--
+
+Out-of-range displacements are usually handled by loading the full
+address into a register. In many cases it would be better to create
+an anchor point instead. E.g. for:
+
+define void @f4a(i128 *%aptr, i64 %base) {
+ %addr = add i64 %base, 524288
+ %bptr = inttoptr i64 %addr to i128 *
+ %a = load volatile i128 *%aptr
+ %b = load i128 *%bptr
+ %add = add i128 %a, %b
+ store i128 %add, i128 *%aptr
+ ret void
+}
+
+(from CodeGen/SystemZ/int-add-08.ll) we load %base+524288 and %base+524296
+into separate registers, rather than using %base+524288 as a base for both.
+
+--
+
+Dynamic stack allocations round the size to 8 bytes and then allocate
+that rounded amount. It would be simpler to subtract the unrounded
+size from the copy of the stack pointer and then align the result.
+See CodeGen/SystemZ/alloca-01.ll for an example.
+
+--
+
+Atomic loads and stores use the default compare-and-swap based implementation.
+This is much too conservative in practice, since the architecture guarantees
+that 1-, 2-, 4- and 8-byte loads and stores to aligned addresses are
+inherently atomic.
+
+--
+
+If needed, we can support 16-byte atomics using LPQ, STPQ and CSDG.
+
+--
+
+We might want to model all access registers and use them to spill
+32-bit values.
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.h b/contrib/llvm/lib/Target/SystemZ/SystemZ.h
new file mode 100644
index 000000000000..dcebbad59071
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZ.h
@@ -0,0 +1,115 @@
+//==- SystemZ.h - Top-Level Interface for SystemZ representation -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in
+// the LLVM SystemZ backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZ_H
+#define SYSTEMZ_H
+
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+ class SystemZTargetMachine;
+ class FunctionPass;
+
+ namespace SystemZ {
+ // Condition-code mask values.
+ const unsigned CCMASK_0 = 1 << 3;
+ const unsigned CCMASK_1 = 1 << 2;
+ const unsigned CCMASK_2 = 1 << 1;
+ const unsigned CCMASK_3 = 1 << 0;
+ const unsigned CCMASK_ANY = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
+
+ // Condition-code mask assignments for integer and floating-point
+ // comparisons.
+ const unsigned CCMASK_CMP_EQ = CCMASK_0;
+ const unsigned CCMASK_CMP_LT = CCMASK_1;
+ const unsigned CCMASK_CMP_GT = CCMASK_2;
+ const unsigned CCMASK_CMP_NE = CCMASK_CMP_LT | CCMASK_CMP_GT;
+ const unsigned CCMASK_CMP_LE = CCMASK_CMP_EQ | CCMASK_CMP_LT;
+ const unsigned CCMASK_CMP_GE = CCMASK_CMP_EQ | CCMASK_CMP_GT;
+
+ // Condition-code mask assignments for floating-point comparisons only.
+ const unsigned CCMASK_CMP_UO = CCMASK_3;
+ const unsigned CCMASK_CMP_O = CCMASK_ANY ^ CCMASK_CMP_UO;
+
+ // All condition-code values produced by comparisons.
+ const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2;
+ const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
+
+ // Condition-code mask assignments for CS.
+ const unsigned CCMASK_CS_EQ = CCMASK_0;
+ const unsigned CCMASK_CS_NE = CCMASK_1;
+ const unsigned CCMASK_CS = CCMASK_0 | CCMASK_1;
+
+ // Condition-code mask assignments for a completed SRST loop.
+ const unsigned CCMASK_SRST_FOUND = CCMASK_1;
+ const unsigned CCMASK_SRST_NOTFOUND = CCMASK_2;
+ const unsigned CCMASK_SRST = CCMASK_1 | CCMASK_2;
+
+ // Condition-code mask assignments for TEST UNDER MASK.
+ const unsigned CCMASK_TM_ALL_0 = CCMASK_0;
+ const unsigned CCMASK_TM_MIXED_MSB_0 = CCMASK_1;
+ const unsigned CCMASK_TM_MIXED_MSB_1 = CCMASK_2;
+ const unsigned CCMASK_TM_ALL_1 = CCMASK_3;
+ const unsigned CCMASK_TM_SOME_0 = CCMASK_TM_ALL_1 ^ CCMASK_ANY;
+ const unsigned CCMASK_TM_SOME_1 = CCMASK_TM_ALL_0 ^ CCMASK_ANY;
+ const unsigned CCMASK_TM_MSB_0 = CCMASK_0 | CCMASK_1;
+ const unsigned CCMASK_TM_MSB_1 = CCMASK_2 | CCMASK_3;
+ const unsigned CCMASK_TM = CCMASK_ANY;
+
+ // The position of the low CC bit in an IPM result.
+ const unsigned IPM_CC = 28;
+
+ // Mask assignments for PFD.
+ const unsigned PFD_READ = 1;
+ const unsigned PFD_WRITE = 2;
+
+ // Return true if Val fits an LLILL operand.
+ static inline bool isImmLL(uint64_t Val) {
+ return (Val & ~0x000000000000ffffULL) == 0;
+ }
+
+ // Return true if Val fits an LLILH operand.
+ static inline bool isImmLH(uint64_t Val) {
+ return (Val & ~0x00000000ffff0000ULL) == 0;
+ }
+
+ // Return true if Val fits an LLIHL operand.
+ static inline bool isImmHL(uint64_t Val) {
+ return (Val & ~0x00000ffff00000000ULL) == 0;
+ }
+
+ // Return true if Val fits an LLIHH operand.
+ static inline bool isImmHH(uint64_t Val) {
+ return (Val & ~0xffff000000000000ULL) == 0;
+ }
+
+ // Return true if Val fits an LLILF operand.
+ static inline bool isImmLF(uint64_t Val) {
+ return (Val & ~0x00000000ffffffffULL) == 0;
+ }
+
+ // Return true if Val fits an LLIHF operand.
+ static inline bool isImmHF(uint64_t Val) {
+ return (Val & ~0xffffffff00000000ULL) == 0;
+ }
+ }
+
+ FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+ FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
+ FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
+ FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
+} // end namespace llvm;
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.td b/contrib/llvm/lib/Target/SystemZ/SystemZ.td
new file mode 100644
index 000000000000..abf5c8eb320c
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZ.td
@@ -0,0 +1,72 @@
+//===-- SystemZ.td - Describe the SystemZ target machine -----*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// SystemZ supported processors and features
+//===----------------------------------------------------------------------===//
+
+include "SystemZProcessors.td"
+
+//===----------------------------------------------------------------------===//
+// Register file description
+//===----------------------------------------------------------------------===//
+
+include "SystemZRegisterInfo.td"
+
+//===----------------------------------------------------------------------===//
+// Calling convention description
+//===----------------------------------------------------------------------===//
+
+include "SystemZCallingConv.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction descriptions
+//===----------------------------------------------------------------------===//
+
+include "SystemZOperators.td"
+include "SystemZOperands.td"
+include "SystemZPatterns.td"
+include "SystemZInstrFormats.td"
+include "SystemZInstrInfo.td"
+include "SystemZInstrFP.td"
+
+def SystemZInstrInfo : InstrInfo {}
+
+//===----------------------------------------------------------------------===//
+// Assembly parser
+//===----------------------------------------------------------------------===//
+
+def SystemZAsmParser : AsmParser {
+ let ShouldEmitMatchRegisterName = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Assembly writer
+//===----------------------------------------------------------------------===//
+
+def SystemZAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ bit isMCAsmWriter = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level target declaration
+//===----------------------------------------------------------------------===//
+
+def SystemZ : Target {
+ let InstructionSet = SystemZInstrInfo;
+ let AssemblyParsers = [SystemZAsmParser];
+ let AssemblyWriters = [SystemZAsmWriter];
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
new file mode 100644
index 000000000000..75cbda4958a2
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -0,0 +1,239 @@
+//===-- SystemZAsmPrinter.cpp - SystemZ LLVM assembly printer -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Streams SystemZ assembly language and associated data, in the form of
+// MCInsts and MCExprs respectively.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZAsmPrinter.h"
+#include "InstPrinter/SystemZInstPrinter.h"
+#include "SystemZConstantPoolValue.h"
+#include "SystemZMCInstLower.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInstBuilder.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/Mangler.h"
+
+using namespace llvm;
+
+// Return an RI instruction like MI with opcode Opcode, but with the
+// GR64 register operands turned into GR32s.
+static MCInst lowerRILow(const MachineInstr *MI, unsigned Opcode) {
+ if (MI->isCompare())
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(1).getImm());
+ else
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(1).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+}
+
+// Return an RI instruction like MI with opcode Opcode, but with the
+// GR64 register operands turned into GRH32s.
+static MCInst lowerRIHigh(const MachineInstr *MI, unsigned Opcode) {
+ if (MI->isCompare())
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(1).getImm());
+ else
+ return MCInstBuilder(Opcode)
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(0).getReg()))
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(1).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+}
+
+// Return an RI instruction like MI with opcode Opcode, but with the
+// R2 register turned into a GR64.
+static MCInst lowerRIEfLow(const MachineInstr *MI, unsigned Opcode) {
+ return MCInstBuilder(Opcode)
+ .addReg(MI->getOperand(0).getReg())
+ .addReg(MI->getOperand(1).getReg())
+ .addReg(SystemZMC::getRegAsGR64(MI->getOperand(2).getReg()))
+ .addImm(MI->getOperand(3).getImm())
+ .addImm(MI->getOperand(4).getImm())
+ .addImm(MI->getOperand(5).getImm());
+}
+
+void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ SystemZMCInstLower Lower(MF->getContext(), *this);
+ MCInst LoweredMI;
+ switch (MI->getOpcode()) {
+ case SystemZ::Return:
+ LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R14D);
+ break;
+
+ case SystemZ::CallBRASL:
+ LoweredMI = MCInstBuilder(SystemZ::BRASL)
+ .addReg(SystemZ::R14D)
+ .addExpr(Lower.getExpr(MI->getOperand(0), MCSymbolRefExpr::VK_PLT));
+ break;
+
+ case SystemZ::CallBASR:
+ LoweredMI = MCInstBuilder(SystemZ::BASR)
+ .addReg(SystemZ::R14D)
+ .addReg(MI->getOperand(0).getReg());
+ break;
+
+ case SystemZ::CallJG:
+ LoweredMI = MCInstBuilder(SystemZ::JG)
+ .addExpr(Lower.getExpr(MI->getOperand(0), MCSymbolRefExpr::VK_PLT));
+ break;
+
+ case SystemZ::CallBR:
+ LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R1D);
+ break;
+
+ case SystemZ::IILF64:
+ LoweredMI = MCInstBuilder(SystemZ::IILF)
+ .addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+ break;
+
+ case SystemZ::IIHF64:
+ LoweredMI = MCInstBuilder(SystemZ::IIHF)
+ .addReg(SystemZMC::getRegAsGRH32(MI->getOperand(0).getReg()))
+ .addImm(MI->getOperand(2).getImm());
+ break;
+
+ case SystemZ::RISBHH:
+ case SystemZ::RISBHL:
+ LoweredMI = lowerRIEfLow(MI, SystemZ::RISBHG);
+ break;
+
+ case SystemZ::RISBLH:
+ case SystemZ::RISBLL:
+ LoweredMI = lowerRIEfLow(MI, SystemZ::RISBLG);
+ break;
+
+#define LOWER_LOW(NAME) \
+ case SystemZ::NAME##64: LoweredMI = lowerRILow(MI, SystemZ::NAME); break
+
+ LOWER_LOW(IILL);
+ LOWER_LOW(IILH);
+ LOWER_LOW(TMLL);
+ LOWER_LOW(TMLH);
+ LOWER_LOW(NILL);
+ LOWER_LOW(NILH);
+ LOWER_LOW(NILF);
+ LOWER_LOW(OILL);
+ LOWER_LOW(OILH);
+ LOWER_LOW(OILF);
+ LOWER_LOW(XILF);
+
+#undef LOWER_LOW
+
+#define LOWER_HIGH(NAME) \
+ case SystemZ::NAME##64: LoweredMI = lowerRIHigh(MI, SystemZ::NAME); break
+
+ LOWER_HIGH(IIHL);
+ LOWER_HIGH(IIHH);
+ LOWER_HIGH(TMHL);
+ LOWER_HIGH(TMHH);
+ LOWER_HIGH(NIHL);
+ LOWER_HIGH(NIHH);
+ LOWER_HIGH(NIHF);
+ LOWER_HIGH(OIHL);
+ LOWER_HIGH(OIHH);
+ LOWER_HIGH(OIHF);
+ LOWER_HIGH(XIHF);
+
+#undef LOWER_HIGH
+
+ default:
+ Lower.lower(MI, LoweredMI);
+ break;
+ }
+ OutStreamer.EmitInstruction(LoweredMI);
+}
+
+// Convert a SystemZ-specific constant pool modifier into the associated
+// MCSymbolRefExpr variant kind.
+static MCSymbolRefExpr::VariantKind
+getModifierVariantKind(SystemZCP::SystemZCPModifier Modifier) {
+ switch (Modifier) {
+ case SystemZCP::NTPOFF: return MCSymbolRefExpr::VK_NTPOFF;
+ }
+ llvm_unreachable("Invalid SystemCPModifier!");
+}
+
+void SystemZAsmPrinter::
+EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
+ SystemZConstantPoolValue *ZCPV =
+ static_cast<SystemZConstantPoolValue*>(MCPV);
+
+ const MCExpr *Expr =
+ MCSymbolRefExpr::Create(getSymbol(ZCPV->getGlobalValue()),
+ getModifierVariantKind(ZCPV->getModifier()),
+ OutContext);
+ uint64_t Size = TM.getDataLayout()->getTypeAllocSize(ZCPV->getType());
+
+ OutStreamer.EmitValue(Expr, Size);
+}
+
+bool SystemZAsmPrinter::PrintAsmOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ if (ExtraCode && *ExtraCode == 'n') {
+ if (!MI->getOperand(OpNo).isImm())
+ return true;
+ OS << -int64_t(MI->getOperand(OpNo).getImm());
+ } else {
+ SystemZMCInstLower Lower(MF->getContext(), *this);
+ MCOperand MO(Lower.lowerOperand(MI->getOperand(OpNo)));
+ SystemZInstPrinter::printOperand(MO, OS);
+ }
+ return false;
+}
+
+bool SystemZAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ SystemZInstPrinter::printAddress(MI->getOperand(OpNo).getReg(),
+ MI->getOperand(OpNo + 1).getImm(),
+ MI->getOperand(OpNo + 2).getReg(), OS);
+ return false;
+}
+
+void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) {
+ if (Subtarget->isTargetELF()) {
+ const TargetLoweringObjectFileELF &TLOFELF =
+ static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
+
+ MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
+
+ // Output stubs for external and common global variables.
+ MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList();
+ if (!Stubs.empty()) {
+ OutStreamer.SwitchSection(TLOFELF.getDataRelSection());
+ const DataLayout *TD = TM.getDataLayout();
+
+ for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
+ OutStreamer.EmitLabel(Stubs[i].first);
+ OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
+ TD->getPointerSize(0));
+ }
+ Stubs.clear();
+ }
+ }
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeSystemZAsmPrinter() {
+ RegisterAsmPrinter<SystemZAsmPrinter> X(TheSystemZTarget);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
new file mode 100644
index 000000000000..4b6c51b6f0b3
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
@@ -0,0 +1,52 @@
+//===-- SystemZAsmPrinter.h - SystemZ LLVM assembly printer ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZASMPRINTER_H
+#define SYSTEMZASMPRINTER_H
+
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class MCStreamer;
+class MachineBasicBlock;
+class MachineInstr;
+class Module;
+class raw_ostream;
+
+class LLVM_LIBRARY_VISIBILITY SystemZAsmPrinter : public AsmPrinter {
+private:
+ const SystemZSubtarget *Subtarget;
+
+public:
+ SystemZAsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
+ : AsmPrinter(TM, Streamer) {
+ Subtarget = &TM.getSubtarget<SystemZSubtarget>();
+ }
+
+ // Override AsmPrinter.
+ virtual const char *getPassName() const LLVM_OVERRIDE {
+ return "SystemZ Assembly Printer";
+ }
+ virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE;
+ virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV)
+ LLVM_OVERRIDE;
+ virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) LLVM_OVERRIDE;
+ virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS) LLVM_OVERRIDE;
+ virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.cpp
new file mode 100644
index 000000000000..cc9c84b6a058
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.cpp
@@ -0,0 +1,21 @@
+//===-- SystemZCallingConv.cpp - Calling conventions for SystemZ ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZCallingConv.h"
+#include "SystemZRegisterInfo.h"
+
+using namespace llvm;
+
+const unsigned SystemZ::ArgGPRs[SystemZ::NumArgGPRs] = {
+ SystemZ::R2D, SystemZ::R3D, SystemZ::R4D, SystemZ::R5D, SystemZ::R6D
+};
+
+const unsigned SystemZ::ArgFPRs[SystemZ::NumArgFPRs] = {
+ SystemZ::F0D, SystemZ::F2D, SystemZ::F4D, SystemZ::F6D
+};
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
new file mode 100644
index 000000000000..298985e7e514
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
@@ -0,0 +1,23 @@
+//===-- SystemZCallingConv.h - Calling conventions for SystemZ --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZCALLINGCONV_H
+#define SYSTEMZCALLINGCONV_H
+
+namespace llvm {
+ namespace SystemZ {
+ const unsigned NumArgGPRs = 5;
+ extern const unsigned ArgGPRs[NumArgGPRs];
+
+ const unsigned NumArgFPRs = 4;
+ extern const unsigned ArgFPRs[NumArgFPRs];
+ }
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
new file mode 100644
index 000000000000..c4f641e7bdec
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
@@ -0,0 +1,65 @@
+//=- SystemZCallingConv.td - Calling conventions for SystemZ -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This describes the calling conventions for the SystemZ ABI.
+//===----------------------------------------------------------------------===//
+
+class CCIfExtend<CCAction A>
+ : CCIf<"ArgFlags.isSExt() || ArgFlags.isZExt()", A>;
+
+//===----------------------------------------------------------------------===//
+// SVR4 return value calling convention
+//===----------------------------------------------------------------------===//
+def RetCC_SystemZ : CallingConv<[
+ // Promote i32 to i64 if it has an explicit extension type.
+ CCIfType<[i32], CCIfExtend<CCPromoteToType<i64>>>,
+
+ // ABI-compliant code returns 64-bit integers in R2. Make the other
+ // call-clobbered argument registers available for code that doesn't
+ // care about the ABI. (R6 is an argument register too, but is
+ // call-saved and therefore not suitable for return values.)
+ CCIfType<[i32], CCAssignToReg<[R2L, R3L, R4L, R5L]>>,
+ CCIfType<[i64], CCAssignToReg<[R2D, R3D, R4D, R5D]>>,
+
+ // ABI-complaint code returns float and double in F0. Make the
+ // other floating-point argument registers available for code that
+ // doesn't care about the ABI. All floating-point argument registers
+ // are call-clobbered, so we can use all of them here.
+ CCIfType<[f32], CCAssignToReg<[F0S, F2S, F4S, F6S]>>,
+ CCIfType<[f64], CCAssignToReg<[F0D, F2D, F4D, F6D]>>
+
+ // ABI-compliant code returns long double by reference, but that conversion
+ // is left to higher-level code. Perhaps we could add an f128 definition
+ // here for code that doesn't care about the ABI?
+]>;
+
+//===----------------------------------------------------------------------===//
+// SVR4 argument calling conventions
+//===----------------------------------------------------------------------===//
+def CC_SystemZ : CallingConv<[
+ // Promote i32 to i64 if it has an explicit extension type.
+ // The convention is that true integer arguments that are smaller
+ // than 64 bits should be marked as extended, but structures that
+ // are smaller than 64 bits shouldn't.
+ CCIfType<[i32], CCIfExtend<CCPromoteToType<i64>>>,
+
+ // Force long double values to the stack and pass i64 pointers to them.
+ CCIfType<[f128], CCPassIndirect<i64>>,
+
+ // The first 5 integer arguments are passed in R2-R6. Note that R6
+ // is call-saved.
+ CCIfType<[i32], CCAssignToReg<[R2L, R3L, R4L, R5L, R6L]>>,
+ CCIfType<[i64], CCAssignToReg<[R2D, R3D, R4D, R5D, R6D]>>,
+
+ // The first 4 float and double arguments are passed in even registers F0-F6.
+ CCIfType<[f32], CCAssignToReg<[F0S, F2S, F4S, F6S]>>,
+ CCIfType<[f64], CCAssignToReg<[F0D, F2D, F4D, F6D]>>,
+
+ // Other arguments are passed in 8-byte-aligned 8-byte stack slots.
+ CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
+]>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
new file mode 100644
index 000000000000..6c7081169a96
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
@@ -0,0 +1,62 @@
+//===-- SystemZConstantPoolValue.cpp - SystemZ constant-pool value --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZConstantPoolValue.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+SystemZConstantPoolValue::
+SystemZConstantPoolValue(const GlobalValue *gv,
+ SystemZCP::SystemZCPModifier modifier)
+ : MachineConstantPoolValue(gv->getType()), GV(gv), Modifier(modifier) {}
+
+SystemZConstantPoolValue *
+SystemZConstantPoolValue::Create(const GlobalValue *GV,
+ SystemZCP::SystemZCPModifier Modifier) {
+ return new SystemZConstantPoolValue(GV, Modifier);
+}
+
+unsigned SystemZConstantPoolValue::getRelocationInfo() const {
+ switch (Modifier) {
+ case SystemZCP::NTPOFF:
+ // May require a relocation, but the relocations are always resolved
+ // by the static linker.
+ return 1;
+ }
+ llvm_unreachable("Unknown modifier");
+}
+
+int SystemZConstantPoolValue::
+getExistingMachineCPValue(MachineConstantPool *CP, unsigned Alignment) {
+ unsigned AlignMask = Alignment - 1;
+ const std::vector<MachineConstantPoolEntry> &Constants = CP->getConstants();
+ for (unsigned I = 0, E = Constants.size(); I != E; ++I) {
+ if (Constants[I].isMachineConstantPoolEntry() &&
+ (Constants[I].getAlignment() & AlignMask) == 0) {
+ SystemZConstantPoolValue *ZCPV =
+ static_cast<SystemZConstantPoolValue *>(Constants[I].Val.MachineCPVal);
+ if (ZCPV->GV == GV && ZCPV->Modifier == Modifier)
+ return I;
+ }
+ }
+ return -1;
+}
+
+void SystemZConstantPoolValue::addSelectionDAGCSEId(FoldingSetNodeID &ID) {
+ ID.AddPointer(GV);
+ ID.AddInteger(Modifier);
+}
+
+void SystemZConstantPoolValue::print(raw_ostream &O) const {
+ O << GV << "@" << int(Modifier);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
new file mode 100644
index 000000000000..9927bdb262c4
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
@@ -0,0 +1,55 @@
+//===- SystemZConstantPoolValue.h - SystemZ constant-pool value -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZCONSTANTPOOLVALUE_H
+#define SYSTEMZCONSTANTPOOLVALUE_H
+
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+class GlobalValue;
+
+namespace SystemZCP {
+ enum SystemZCPModifier {
+ NTPOFF
+ };
+}
+
+/// A SystemZ-specific constant pool value. At present, the only
+/// defined constant pool values are offsets of thread-local variables
+/// (written x@NTPOFF).
+class SystemZConstantPoolValue : public MachineConstantPoolValue {
+ const GlobalValue *GV;
+ SystemZCP::SystemZCPModifier Modifier;
+
+protected:
+ SystemZConstantPoolValue(const GlobalValue *GV,
+ SystemZCP::SystemZCPModifier Modifier);
+
+public:
+ static SystemZConstantPoolValue *
+ Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier);
+
+ // Override MachineConstantPoolValue.
+ virtual unsigned getRelocationInfo() const LLVM_OVERRIDE;
+ virtual int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) LLVM_OVERRIDE;
+ virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) LLVM_OVERRIDE;
+ virtual void print(raw_ostream &O) const LLVM_OVERRIDE;
+
+ // Access SystemZ-specific fields.
+ const GlobalValue *getGlobalValue() const { return GV; }
+ SystemZCP::SystemZCPModifier getModifier() const { return Modifier; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
new file mode 100644
index 000000000000..b8a77db0f845
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -0,0 +1,471 @@
+//===-- SystemZElimCompare.cpp - Eliminate comparison instructions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass:
+// (1) tries to remove compares if CC already contains the required information
+// (2) fuses compares and branches into COMPARE AND BRANCH instructions
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-elim-compare"
+
+#include "SystemZTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+STATISTIC(BranchOnCounts, "Number of branch-on-count instructions");
+STATISTIC(EliminatedComparisons, "Number of eliminated comparisons");
+STATISTIC(FusedComparisons, "Number of fused compare-and-branch instructions");
+
+namespace {
+ // Represents the references to a particular register in one or more
+ // instructions.
+ struct Reference {
+ Reference()
+ : Def(false), Use(false), IndirectDef(false), IndirectUse(false) {}
+
+ Reference &operator|=(const Reference &Other) {
+ Def |= Other.Def;
+ IndirectDef |= Other.IndirectDef;
+ Use |= Other.Use;
+ IndirectUse |= Other.IndirectUse;
+ return *this;
+ }
+
+ operator bool() const { return Def || Use; }
+
+ // True if the register is defined or used in some form, either directly or
+ // via a sub- or super-register.
+ bool Def;
+ bool Use;
+
+ // True if the register is defined or used indirectly, by a sub- or
+ // super-register.
+ bool IndirectDef;
+ bool IndirectUse;
+ };
+
+ class SystemZElimCompare : public MachineFunctionPass {
+ public:
+ static char ID;
+ SystemZElimCompare(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(0), TRI(0) {}
+
+ virtual const char *getPassName() const {
+ return "SystemZ Comparison Elimination";
+ }
+
+ bool processBlock(MachineBasicBlock *MBB);
+ bool runOnMachineFunction(MachineFunction &F);
+
+ private:
+ Reference getRegReferences(MachineInstr *MI, unsigned Reg);
+ bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool convertToLoadAndTest(MachineInstr *MI);
+ bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool optimizeCompareZero(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool fuseCompareAndBranch(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+
+ const SystemZInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ };
+
+ char SystemZElimCompare::ID = 0;
+} // end of anonymous namespace
+
+FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) {
+ return new SystemZElimCompare(TM);
+}
+
+// Return true if CC is live out of MBB.
+static bool isCCLiveOut(MachineBasicBlock *MBB) {
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI)
+ if ((*SI)->isLiveIn(SystemZ::CC))
+ return true;
+ return false;
+}
+
+// Return true if any CC result of MI would reflect the value of subreg
+// SubReg of Reg.
+static bool resultTests(MachineInstr *MI, unsigned Reg, unsigned SubReg) {
+ if (MI->getNumOperands() > 0 &&
+ MI->getOperand(0).isReg() &&
+ MI->getOperand(0).isDef() &&
+ MI->getOperand(0).getReg() == Reg &&
+ MI->getOperand(0).getSubReg() == SubReg)
+ return true;
+
+ switch (MI->getOpcode()) {
+ case SystemZ::LR:
+ case SystemZ::LGR:
+ case SystemZ::LGFR:
+ case SystemZ::LTR:
+ case SystemZ::LTGR:
+ case SystemZ::LTGFR:
+ case SystemZ::LER:
+ case SystemZ::LDR:
+ case SystemZ::LXR:
+ case SystemZ::LTEBR:
+ case SystemZ::LTDBR:
+ case SystemZ::LTXBR:
+ if (MI->getOperand(1).getReg() == Reg &&
+ MI->getOperand(1).getSubReg() == SubReg)
+ return true;
+ }
+
+ return false;
+}
+
+// Describe the references to Reg in MI, including sub- and super-registers.
+Reference SystemZElimCompare::getRegReferences(MachineInstr *MI, unsigned Reg) {
+ Reference Ref;
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ if (MO.isReg()) {
+ if (unsigned MOReg = MO.getReg()) {
+ if (MOReg == Reg || TRI->regsOverlap(MOReg, Reg)) {
+ if (MO.isUse()) {
+ Ref.Use = true;
+ Ref.IndirectUse |= (MOReg != Reg);
+ }
+ if (MO.isDef()) {
+ Ref.Def = true;
+ Ref.IndirectDef |= (MOReg != Reg);
+ }
+ }
+ }
+ }
+ }
+ return Ref;
+}
+
+// Compare compares the result of MI against zero. If MI is an addition
+// of -1 and if CCUsers is a single branch on nonzero, eliminate the addition
+// and convert the branch to a BRCT(G). Return true on success.
+bool
+SystemZElimCompare::convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ // Check whether we have an addition of -1.
+ unsigned Opcode = MI->getOpcode();
+ unsigned BRCT;
+ if (Opcode == SystemZ::AHI)
+ BRCT = SystemZ::BRCT;
+ else if (Opcode == SystemZ::AGHI)
+ BRCT = SystemZ::BRCTG;
+ else
+ return false;
+ if (MI->getOperand(2).getImm() != -1)
+ return false;
+
+ // Check whether we have a single JLH.
+ if (CCUsers.size() != 1)
+ return false;
+ MachineInstr *Branch = CCUsers[0];
+ if (Branch->getOpcode() != SystemZ::BRC ||
+ Branch->getOperand(0).getImm() != SystemZ::CCMASK_ICMP ||
+ Branch->getOperand(1).getImm() != SystemZ::CCMASK_CMP_NE)
+ return false;
+
+ // We already know that there are no references to the register between
+ // MI and Compare. Make sure that there are also no references between
+ // Compare and Branch.
+ unsigned SrcReg = Compare->getOperand(0).getReg();
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
+ for (++MBBI; MBBI != MBBE; ++MBBI)
+ if (getRegReferences(MBBI, SrcReg))
+ return false;
+
+ // The transformation is OK. Rebuild Branch as a BRCT(G).
+ MachineOperand Target(Branch->getOperand(2));
+ Branch->RemoveOperand(2);
+ Branch->RemoveOperand(1);
+ Branch->RemoveOperand(0);
+ Branch->setDesc(TII->get(BRCT));
+ MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
+ .addOperand(MI->getOperand(0))
+ .addOperand(MI->getOperand(1))
+ .addOperand(Target)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine);
+ MI->removeFromParent();
+ return true;
+}
+
+// If MI is a load instruction, try to convert it into a LOAD AND TEST.
+// Return true on success.
+bool SystemZElimCompare::convertToLoadAndTest(MachineInstr *MI) {
+ unsigned Opcode = TII->getLoadAndTest(MI->getOpcode());
+ if (!Opcode)
+ return false;
+
+ MI->setDesc(TII->get(Opcode));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine);
+ return true;
+}
+
+// The CC users in CCUsers are testing the result of a comparison of some
+// value X against zero and we know that any CC value produced by MI
+// would also reflect the value of X. Try to adjust CCUsers so that
+// they test the result of MI directly, returning true on success.
+// Leave everything unchanged on failure.
+bool SystemZElimCompare::
+adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ int Opcode = MI->getOpcode();
+ const MCInstrDesc &Desc = TII->get(Opcode);
+ unsigned MIFlags = Desc.TSFlags;
+
+ // See which compare-style condition codes are available.
+ unsigned ReusableCCMask = SystemZII::getCompareZeroCCMask(MIFlags);
+
+ // For unsigned comparisons with zero, only equality makes sense.
+ unsigned CompareFlags = Compare->getDesc().TSFlags;
+ if (CompareFlags & SystemZII::IsLogical)
+ ReusableCCMask &= SystemZ::CCMASK_CMP_EQ;
+
+ if (ReusableCCMask == 0)
+ return false;
+
+ unsigned CCValues = SystemZII::getCCValues(MIFlags);
+ assert((ReusableCCMask & ~CCValues) == 0 && "Invalid CCValues");
+
+ // Now check whether these flags are enough for all users.
+ SmallVector<MachineOperand *, 4> AlterMasks;
+ for (unsigned int I = 0, E = CCUsers.size(); I != E; ++I) {
+ MachineInstr *MI = CCUsers[I];
+
+ // Fail if this isn't a use of CC that we understand.
+ unsigned Flags = MI->getDesc().TSFlags;
+ unsigned FirstOpNum;
+ if (Flags & SystemZII::CCMaskFirst)
+ FirstOpNum = 0;
+ else if (Flags & SystemZII::CCMaskLast)
+ FirstOpNum = MI->getNumExplicitOperands() - 2;
+ else
+ return false;
+
+ // Check whether the instruction predicate treats all CC values
+ // outside of ReusableCCMask in the same way. In that case it
+ // doesn't matter what those CC values mean.
+ unsigned CCValid = MI->getOperand(FirstOpNum).getImm();
+ unsigned CCMask = MI->getOperand(FirstOpNum + 1).getImm();
+ unsigned OutValid = ~ReusableCCMask & CCValid;
+ unsigned OutMask = ~ReusableCCMask & CCMask;
+ if (OutMask != 0 && OutMask != OutValid)
+ return false;
+
+ AlterMasks.push_back(&MI->getOperand(FirstOpNum));
+ AlterMasks.push_back(&MI->getOperand(FirstOpNum + 1));
+ }
+
+ // All users are OK. Adjust the masks for MI.
+ for (unsigned I = 0, E = AlterMasks.size(); I != E; I += 2) {
+ AlterMasks[I]->setImm(CCValues);
+ unsigned CCMask = AlterMasks[I + 1]->getImm();
+ if (CCMask & ~ReusableCCMask)
+ AlterMasks[I + 1]->setImm((CCMask & ReusableCCMask) |
+ (CCValues & ~ReusableCCMask));
+ }
+
+ // CC is now live after MI.
+ int CCDef = MI->findRegisterDefOperandIdx(SystemZ::CC, false, true, TRI);
+ assert(CCDef >= 0 && "Couldn't find CC set");
+ MI->getOperand(CCDef).setIsDead(false);
+
+ // Clear any intervening kills of CC.
+ MachineBasicBlock::iterator MBBI = MI, MBBE = Compare;
+ for (++MBBI; MBBI != MBBE; ++MBBI)
+ MBBI->clearRegisterKills(SystemZ::CC, TRI);
+
+ return true;
+}
+
+// Return true if Compare is a comparison against zero.
+static bool isCompareZero(MachineInstr *Compare) {
+ switch (Compare->getOpcode()) {
+ case SystemZ::LTEBRCompare:
+ case SystemZ::LTDBRCompare:
+ case SystemZ::LTXBRCompare:
+ return true;
+
+ default:
+ return (Compare->getNumExplicitOperands() == 2 &&
+ Compare->getOperand(1).isImm() &&
+ Compare->getOperand(1).getImm() == 0);
+ }
+}
+
+// Try to optimize cases where comparison instruction Compare is testing
+// a value against zero. Return true on success and if Compare should be
+// deleted as dead. CCUsers is the list of instructions that use the CC
+// value produced by Compare.
+bool SystemZElimCompare::
+optimizeCompareZero(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ if (!isCompareZero(Compare))
+ return false;
+
+ // Search back for CC results that are based on the first operand.
+ unsigned SrcReg = Compare->getOperand(0).getReg();
+ unsigned SrcSubReg = Compare->getOperand(0).getSubReg();
+ MachineBasicBlock *MBB = Compare->getParent();
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB->begin();
+ Reference CCRefs;
+ Reference SrcRefs;
+ while (MBBI != MBBE) {
+ --MBBI;
+ MachineInstr *MI = MBBI;
+ if (resultTests(MI, SrcReg, SrcSubReg)) {
+ // Try to remove both MI and Compare by converting a branch to BRCT(G).
+ // We don't care in this case whether CC is modified between MI and
+ // Compare.
+ if (!CCRefs.Use && !SrcRefs && convertToBRCT(MI, Compare, CCUsers)) {
+ BranchOnCounts += 1;
+ return true;
+ }
+ // Try to eliminate Compare by reusing a CC result from MI.
+ if ((!CCRefs && convertToLoadAndTest(MI)) ||
+ (!CCRefs.Def && adjustCCMasksForInstr(MI, Compare, CCUsers))) {
+ EliminatedComparisons += 1;
+ return true;
+ }
+ }
+ SrcRefs |= getRegReferences(MI, SrcReg);
+ if (SrcRefs.Def)
+ return false;
+ CCRefs |= getRegReferences(MI, SystemZ::CC);
+ if (CCRefs.Use && CCRefs.Def)
+ return false;
+ }
+ return false;
+}
+
+// Try to fuse comparison instruction Compare into a later branch.
+// Return true on success and if Compare is therefore redundant.
+bool SystemZElimCompare::
+fuseCompareAndBranch(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers) {
+ // See whether we have a comparison that can be fused.
+ unsigned FusedOpcode = TII->getCompareAndBranch(Compare->getOpcode(),
+ Compare);
+ if (!FusedOpcode)
+ return false;
+
+ // See whether we have a single branch with which to fuse.
+ if (CCUsers.size() != 1)
+ return false;
+ MachineInstr *Branch = CCUsers[0];
+ if (Branch->getOpcode() != SystemZ::BRC)
+ return false;
+
+ // Make sure that the operands are available at the branch.
+ unsigned SrcReg = Compare->getOperand(0).getReg();
+ unsigned SrcReg2 = (Compare->getOperand(1).isReg() ?
+ Compare->getOperand(1).getReg() : 0);
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
+ for (++MBBI; MBBI != MBBE; ++MBBI)
+ if (MBBI->modifiesRegister(SrcReg, TRI) ||
+ (SrcReg2 && MBBI->modifiesRegister(SrcReg2, TRI)))
+ return false;
+
+ // Read the branch mask and target.
+ MachineOperand CCMask(MBBI->getOperand(1));
+ MachineOperand Target(MBBI->getOperand(2));
+ assert((CCMask.getImm() & ~SystemZ::CCMASK_ICMP) == 0 &&
+ "Invalid condition-code mask for integer comparison");
+
+ // Clear out all current operands.
+ int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, false, TRI);
+ assert(CCUse >= 0 && "BRC must use CC");
+ Branch->RemoveOperand(CCUse);
+ Branch->RemoveOperand(2);
+ Branch->RemoveOperand(1);
+ Branch->RemoveOperand(0);
+
+ // Rebuild Branch as a fused compare and branch.
+ Branch->setDesc(TII->get(FusedOpcode));
+ MachineInstrBuilder(*Branch->getParent()->getParent(), Branch)
+ .addOperand(Compare->getOperand(0))
+ .addOperand(Compare->getOperand(1))
+ .addOperand(CCMask)
+ .addOperand(Target)
+ .addReg(SystemZ::CC, RegState::ImplicitDefine);
+
+ // Clear any intervening kills of SrcReg and SrcReg2.
+ MBBI = Compare;
+ for (++MBBI; MBBI != MBBE; ++MBBI) {
+ MBBI->clearRegisterKills(SrcReg, TRI);
+ if (SrcReg2)
+ MBBI->clearRegisterKills(SrcReg2, TRI);
+ }
+ FusedComparisons += 1;
+ return true;
+}
+
+// Process all comparison instructions in MBB. Return true if something
+// changed.
+bool SystemZElimCompare::processBlock(MachineBasicBlock *MBB) {
+ bool Changed = false;
+
+ // Walk backwards through the block looking for comparisons, recording
+ // all CC users as we go. The subroutines can delete Compare and
+ // instructions before it.
+ bool CompleteCCUsers = !isCCLiveOut(MBB);
+ SmallVector<MachineInstr *, 4> CCUsers;
+ MachineBasicBlock::iterator MBBI = MBB->end();
+ while (MBBI != MBB->begin()) {
+ MachineInstr *MI = --MBBI;
+ if (CompleteCCUsers &&
+ MI->isCompare() &&
+ (optimizeCompareZero(MI, CCUsers) ||
+ fuseCompareAndBranch(MI, CCUsers))) {
+ ++MBBI;
+ MI->removeFromParent();
+ Changed = true;
+ CCUsers.clear();
+ CompleteCCUsers = true;
+ continue;
+ }
+
+ Reference CCRefs(getRegReferences(MI, SystemZ::CC));
+ if (CCRefs.Def) {
+ CCUsers.clear();
+ CompleteCCUsers = !CCRefs.IndirectDef;
+ }
+ if (CompleteCCUsers && CCRefs.Use)
+ CCUsers.push_back(MI);
+ }
+ return Changed;
+}
+
+bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
+ TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
+ TRI = &TII->getRegisterInfo();
+
+ bool Changed = false;
+ for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
+ MFI != MFE; ++MFI)
+ Changed |= processBlock(MFI);
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
new file mode 100644
index 000000000000..acfb491b953b
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -0,0 +1,521 @@
+//===-- SystemZFrameLowering.cpp - Frame lowering for SystemZ -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZFrameLowering.h"
+#include "SystemZCallingConv.h"
+#include "SystemZInstrBuilder.h"
+#include "SystemZMachineFunctionInfo.h"
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/IR/Function.h"
+
+using namespace llvm;
+
+namespace {
+ // The ABI-defined register save slots, relative to the incoming stack
+ // pointer.
+ static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {
+ { SystemZ::R2D, 0x10 },
+ { SystemZ::R3D, 0x18 },
+ { SystemZ::R4D, 0x20 },
+ { SystemZ::R5D, 0x28 },
+ { SystemZ::R6D, 0x30 },
+ { SystemZ::R7D, 0x38 },
+ { SystemZ::R8D, 0x40 },
+ { SystemZ::R9D, 0x48 },
+ { SystemZ::R10D, 0x50 },
+ { SystemZ::R11D, 0x58 },
+ { SystemZ::R12D, 0x60 },
+ { SystemZ::R13D, 0x68 },
+ { SystemZ::R14D, 0x70 },
+ { SystemZ::R15D, 0x78 },
+ { SystemZ::F0D, 0x80 },
+ { SystemZ::F2D, 0x88 },
+ { SystemZ::F4D, 0x90 },
+ { SystemZ::F6D, 0x98 }
+ };
+}
+
+SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm,
+ const SystemZSubtarget &sti)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8,
+ -SystemZMC::CallFrameSize, 8),
+ TM(tm), STI(sti) {
+ // Create a mapping from register number to save slot offset.
+ RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
+ for (unsigned I = 0, E = array_lengthof(SpillOffsetTable); I != E; ++I)
+ RegSpillOffsets[SpillOffsetTable[I].Reg] = SpillOffsetTable[I].Offset;
+}
+
+const TargetFrameLowering::SpillSlot *
+SystemZFrameLowering::getCalleeSavedSpillSlots(unsigned &NumEntries) const {
+ NumEntries = array_lengthof(SpillOffsetTable);
+ return SpillOffsetTable;
+}
+
+void SystemZFrameLowering::
+processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFFrame = MF.getFrameInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ bool HasFP = hasFP(MF);
+ SystemZMachineFunctionInfo *MFI = MF.getInfo<SystemZMachineFunctionInfo>();
+ bool IsVarArg = MF.getFunction()->isVarArg();
+
+ // va_start stores incoming FPR varargs in the normal way, but delegates
+ // the saving of incoming GPR varargs to spillCalleeSavedRegisters().
+ // Record these pending uses, which typically include the call-saved
+ // argument register R6D.
+ if (IsVarArg)
+ for (unsigned I = MFI->getVarArgsFirstGPR(); I < SystemZ::NumArgGPRs; ++I)
+ MRI.setPhysRegUsed(SystemZ::ArgGPRs[I]);
+
+ // If the function requires a frame pointer, record that the hard
+ // frame pointer will be clobbered.
+ if (HasFP)
+ MRI.setPhysRegUsed(SystemZ::R11D);
+
+ // If the function calls other functions, record that the return
+ // address register will be clobbered.
+ if (MFFrame->hasCalls())
+ MRI.setPhysRegUsed(SystemZ::R14D);
+
+ // If we are saving GPRs other than the stack pointer, we might as well
+ // save and restore the stack pointer at the same time, via STMG and LMG.
+ // This allows the deallocation to be done by the LMG, rather than needing
+ // a separate %r15 addition.
+ const uint16_t *CSRegs = TRI->getCalleeSavedRegs(&MF);
+ for (unsigned I = 0; CSRegs[I]; ++I) {
+ unsigned Reg = CSRegs[I];
+ if (SystemZ::GR64BitRegClass.contains(Reg) && MRI.isPhysRegUsed(Reg)) {
+ MRI.setPhysRegUsed(SystemZ::R15D);
+ break;
+ }
+ }
+}
+
+// Add GPR64 to the save instruction being built by MIB, which is in basic
+// block MBB. IsImplicit says whether this is an explicit operand to the
+// instruction, or an implicit one that comes between the explicit start
+// and end registers.
+static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB,
+ const SystemZTargetMachine &TM,
+ unsigned GPR64, bool IsImplicit) {
+ const SystemZRegisterInfo *RI = TM.getRegisterInfo();
+ unsigned GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32);
+ bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32);
+ if (!IsLive || !IsImplicit) {
+ MIB.addReg(GPR64, getImplRegState(IsImplicit) | getKillRegState(!IsLive));
+ if (!IsLive)
+ MBB.addLiveIn(GPR64);
+ }
+}
+
+bool SystemZFrameLowering::
+spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
+ bool IsVarArg = MF.getFunction()->isVarArg();
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Scan the call-saved GPRs and find the bounds of the register spill area.
+ unsigned LowGPR = 0;
+ unsigned HighGPR = SystemZ::R15D;
+ unsigned StartOffset = -1U;
+ for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+ unsigned Reg = CSI[I].getReg();
+ if (SystemZ::GR64BitRegClass.contains(Reg)) {
+ unsigned Offset = RegSpillOffsets[Reg];
+ assert(Offset && "Unexpected GPR save");
+ if (StartOffset > Offset) {
+ LowGPR = Reg;
+ StartOffset = Offset;
+ }
+ }
+ }
+
+ // Save the range of call-saved registers, for use by the epilogue inserter.
+ ZFI->setLowSavedGPR(LowGPR);
+ ZFI->setHighSavedGPR(HighGPR);
+
+ // Include the GPR varargs, if any. R6D is call-saved, so would
+ // be included by the loop above, but we also need to handle the
+ // call-clobbered argument registers.
+ if (IsVarArg) {
+ unsigned FirstGPR = ZFI->getVarArgsFirstGPR();
+ if (FirstGPR < SystemZ::NumArgGPRs) {
+ unsigned Reg = SystemZ::ArgGPRs[FirstGPR];
+ unsigned Offset = RegSpillOffsets[Reg];
+ if (StartOffset > Offset) {
+ LowGPR = Reg; StartOffset = Offset;
+ }
+ }
+ }
+
+ // Save GPRs
+ if (LowGPR) {
+ assert(LowGPR != HighGPR && "Should be saving %r15 and something else");
+
+ // Build an STMG instruction.
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG));
+
+ // Add the explicit register operands.
+ addSavedGPR(MBB, MIB, TM, LowGPR, false);
+ addSavedGPR(MBB, MIB, TM, HighGPR, false);
+
+ // Add the address.
+ MIB.addReg(SystemZ::R15D).addImm(StartOffset);
+
+ // Make sure all call-saved GPRs are included as operands and are
+ // marked as live on entry.
+ for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+ unsigned Reg = CSI[I].getReg();
+ if (SystemZ::GR64BitRegClass.contains(Reg))
+ addSavedGPR(MBB, MIB, TM, Reg, true);
+ }
+
+ // ...likewise GPR varargs.
+ if (IsVarArg)
+ for (unsigned I = ZFI->getVarArgsFirstGPR(); I < SystemZ::NumArgGPRs; ++I)
+ addSavedGPR(MBB, MIB, TM, SystemZ::ArgGPRs[I], true);
+ }
+
+ // Save FPRs in the normal TargetInstrInfo way.
+ for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+ unsigned Reg = CSI[I].getReg();
+ if (SystemZ::FP64BitRegClass.contains(Reg)) {
+ MBB.addLiveIn(Reg);
+ TII->storeRegToStackSlot(MBB, MBBI, Reg, true, CSI[I].getFrameIdx(),
+ &SystemZ::FP64BitRegClass, TRI);
+ }
+ }
+
+ return true;
+}
+
+bool SystemZFrameLowering::
+restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
+ SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
+ bool HasFP = hasFP(MF);
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Restore FPRs in the normal TargetInstrInfo way.
+ for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+ unsigned Reg = CSI[I].getReg();
+ if (SystemZ::FP64BitRegClass.contains(Reg))
+ TII->loadRegFromStackSlot(MBB, MBBI, Reg, CSI[I].getFrameIdx(),
+ &SystemZ::FP64BitRegClass, TRI);
+ }
+
+ // Restore call-saved GPRs (but not call-clobbered varargs, which at
+ // this point might hold return values).
+ unsigned LowGPR = ZFI->getLowSavedGPR();
+ unsigned HighGPR = ZFI->getHighSavedGPR();
+ unsigned StartOffset = RegSpillOffsets[LowGPR];
+ if (LowGPR) {
+ // If we saved any of %r2-%r5 as varargs, we should also be saving
+ // and restoring %r6. If we're saving %r6 or above, we should be
+ // restoring it too.
+ assert(LowGPR != HighGPR && "Should be loading %r15 and something else");
+
+ // Build an LMG instruction.
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::LMG));
+
+ // Add the explicit register operands.
+ MIB.addReg(LowGPR, RegState::Define);
+ MIB.addReg(HighGPR, RegState::Define);
+
+ // Add the address.
+ MIB.addReg(HasFP ? SystemZ::R11D : SystemZ::R15D);
+ MIB.addImm(StartOffset);
+
+ // Do a second scan adding regs as being defined by instruction
+ for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+ unsigned Reg = CSI[I].getReg();
+ if (Reg != LowGPR && Reg != HighGPR)
+ MIB.addReg(Reg, RegState::ImplicitDefine);
+ }
+ }
+
+ return true;
+}
+
+void SystemZFrameLowering::
+processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFFrame = MF.getFrameInfo();
+ uint64_t MaxReach = (MFFrame->estimateStackSize(MF) +
+ SystemZMC::CallFrameSize * 2);
+ if (!isUInt<12>(MaxReach)) {
+ // We may need register scavenging slots if some parts of the frame
+ // are outside the reach of an unsigned 12-bit displacement.
+ // Create 2 for the case where both addresses in an MVC are
+ // out of range.
+ RS->addScavengingFrameIndex(MFFrame->CreateStackObject(8, 8, false));
+ RS->addScavengingFrameIndex(MFFrame->CreateStackObject(8, 8, false));
+ }
+}
+
+// Emit instructions before MBBI (in MBB) to add NumBytes to Reg.
+static void emitIncrement(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const DebugLoc &DL,
+ unsigned Reg, int64_t NumBytes,
+ const TargetInstrInfo *TII) {
+ while (NumBytes) {
+ unsigned Opcode;
+ int64_t ThisVal = NumBytes;
+ if (isInt<16>(NumBytes))
+ Opcode = SystemZ::AGHI;
+ else {
+ Opcode = SystemZ::AGFI;
+ // Make sure we maintain 8-byte stack alignment.
+ int64_t MinVal = -int64_t(1) << 31;
+ int64_t MaxVal = (int64_t(1) << 31) - 8;
+ if (ThisVal < MinVal)
+ ThisVal = MinVal;
+ else if (ThisVal > MaxVal)
+ ThisVal = MaxVal;
+ }
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII->get(Opcode), Reg)
+ .addReg(Reg).addImm(ThisVal);
+ // The CC implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ NumBytes -= ThisVal;
+ }
+}
+
+void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front();
+ MachineFrameInfo *MFFrame = MF.getFrameInfo();
+ const SystemZInstrInfo *ZII =
+ static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
+ SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+ const std::vector<CalleeSavedInfo> &CSI = MFFrame->getCalleeSavedInfo();
+ bool HasFP = hasFP(MF);
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // The current offset of the stack pointer from the CFA.
+ int64_t SPOffsetFromCFA = -SystemZMC::CFAOffsetFromInitialSP;
+
+ if (ZFI->getLowSavedGPR()) {
+ // Skip over the GPR saves.
+ if (MBBI != MBB.end() && MBBI->getOpcode() == SystemZ::STMG)
+ ++MBBI;
+ else
+ llvm_unreachable("Couldn't skip over GPR saves");
+
+ // Add CFI for the GPR saves.
+ MCSymbol *GPRSaveLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL,
+ ZII->get(TargetOpcode::PROLOG_LABEL)).addSym(GPRSaveLabel);
+ for (std::vector<CalleeSavedInfo>::const_iterator
+ I = CSI.begin(), E = CSI.end(); I != E; ++I) {
+ unsigned Reg = I->getReg();
+ if (SystemZ::GR64BitRegClass.contains(Reg)) {
+ int64_t Offset = SPOffsetFromCFA + RegSpillOffsets[Reg];
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ GPRSaveLabel, MRI->getDwarfRegNum(Reg, true), Offset));
+ }
+ }
+ }
+
+ uint64_t StackSize = getAllocatedStackSize(MF);
+ if (StackSize) {
+ // Allocate StackSize bytes.
+ int64_t Delta = -int64_t(StackSize);
+ emitIncrement(MBB, MBBI, DL, SystemZ::R15D, Delta, ZII);
+
+ // Add CFI for the allocation.
+ MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
+ .addSym(AdjustSPLabel);
+ MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(
+ AdjustSPLabel, SPOffsetFromCFA + Delta));
+ SPOffsetFromCFA += Delta;
+ }
+
+ if (HasFP) {
+ // Copy the base of the frame to R11.
+ BuildMI(MBB, MBBI, DL, ZII->get(SystemZ::LGR), SystemZ::R11D)
+ .addReg(SystemZ::R15D);
+
+ // Add CFI for the new frame location.
+ MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
+ .addSym(SetFPLabel);
+ unsigned HardFP = MRI->getDwarfRegNum(SystemZ::R11D, true);
+ MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(SetFPLabel, HardFP));
+
+ // Mark the FramePtr as live at the beginning of every block except
+ // the entry block. (We'll have marked R11 as live on entry when
+ // saving the GPRs.)
+ for (MachineFunction::iterator
+ I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I)
+ I->addLiveIn(SystemZ::R11D);
+ }
+
+ // Skip over the FPR saves.
+ MCSymbol *FPRSaveLabel = 0;
+ for (std::vector<CalleeSavedInfo>::const_iterator
+ I = CSI.begin(), E = CSI.end(); I != E; ++I) {
+ unsigned Reg = I->getReg();
+ if (SystemZ::FP64BitRegClass.contains(Reg)) {
+ if (MBBI != MBB.end() &&
+ (MBBI->getOpcode() == SystemZ::STD ||
+ MBBI->getOpcode() == SystemZ::STDY))
+ ++MBBI;
+ else
+ llvm_unreachable("Couldn't skip over FPR save");
+
+ // Add CFI for the this save.
+ if (!FPRSaveLabel)
+ FPRSaveLabel = MMI.getContext().CreateTempSymbol();
+ unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true);
+ int64_t Offset = getFrameIndexOffset(MF, I->getFrameIdx());
+ MMI.addFrameInst(MCCFIInstruction::createOffset(
+ FPRSaveLabel, Reg, SPOffsetFromCFA + Offset));
+ }
+ }
+ // Complete the CFI for the FPR saves, modelling them as taking effect
+ // after the last save.
+ if (FPRSaveLabel)
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
+ .addSym(FPRSaveLabel);
+}
+
+void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const SystemZInstrInfo *ZII =
+ static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
+ SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
+
+ // Skip the return instruction.
+ assert(MBBI->isReturn() && "Can only insert epilogue into returning blocks");
+
+ uint64_t StackSize = getAllocatedStackSize(MF);
+ if (ZFI->getLowSavedGPR()) {
+ --MBBI;
+ unsigned Opcode = MBBI->getOpcode();
+ if (Opcode != SystemZ::LMG)
+ llvm_unreachable("Expected to see callee-save register restore code");
+
+ unsigned AddrOpNo = 2;
+ DebugLoc DL = MBBI->getDebugLoc();
+ uint64_t Offset = StackSize + MBBI->getOperand(AddrOpNo + 1).getImm();
+ unsigned NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
+
+ // If the offset is too large, use the largest stack-aligned offset
+ // and add the rest to the base register (the stack or frame pointer).
+ if (!NewOpcode) {
+ uint64_t NumBytes = Offset - 0x7fff8;
+ emitIncrement(MBB, MBBI, DL, MBBI->getOperand(AddrOpNo).getReg(),
+ NumBytes, ZII);
+ Offset -= NumBytes;
+ NewOpcode = ZII->getOpcodeForOffset(Opcode, Offset);
+ assert(NewOpcode && "No restore instruction available");
+ }
+
+ MBBI->setDesc(ZII->get(NewOpcode));
+ MBBI->getOperand(AddrOpNo + 1).ChangeToImmediate(Offset);
+ } else if (StackSize) {
+ DebugLoc DL = MBBI->getDebugLoc();
+ emitIncrement(MBB, MBBI, DL, SystemZ::R15D, StackSize, ZII);
+ }
+}
+
+bool SystemZFrameLowering::hasFP(const MachineFunction &MF) const {
+ return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ MF.getFrameInfo()->hasVarSizedObjects() ||
+ MF.getInfo<SystemZMachineFunctionInfo>()->getManipulatesSP());
+}
+
+int SystemZFrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ const MachineFrameInfo *MFFrame = MF.getFrameInfo();
+
+ // Start with the offset of FI from the top of the caller-allocated frame
+ // (i.e. the top of the 160 bytes allocated by the caller). This initial
+ // offset is therefore negative.
+ int64_t Offset = (MFFrame->getObjectOffset(FI) +
+ MFFrame->getOffsetAdjustment());
+
+ // Make the offset relative to the incoming stack pointer.
+ Offset -= getOffsetOfLocalArea();
+
+ // Make the offset relative to the bottom of the frame.
+ Offset += getAllocatedStackSize(MF);
+
+ return Offset;
+}
+
+uint64_t SystemZFrameLowering::
+getAllocatedStackSize(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFFrame = MF.getFrameInfo();
+
+ // Start with the size of the local variables and spill slots.
+ uint64_t StackSize = MFFrame->getStackSize();
+
+ // We need to allocate the ABI-defined 160-byte base area whenever
+ // we allocate stack space for our own use and whenever we call another
+ // function.
+ if (StackSize || MFFrame->hasVarSizedObjects() || MFFrame->hasCalls())
+ StackSize += SystemZMC::CallFrameSize;
+
+ return StackSize;
+}
+
+bool
+SystemZFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ // The ABI requires us to allocate 160 bytes of stack space for the callee,
+ // with any outgoing stack arguments being placed above that. It seems
+ // better to make that area a permanent feature of the frame even if
+ // we're using a frame pointer.
+ return true;
+}
+
+void SystemZFrameLowering::
+eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ switch (MI->getOpcode()) {
+ case SystemZ::ADJCALLSTACKDOWN:
+ case SystemZ::ADJCALLSTACKUP:
+ assert(hasReservedCallFrame(MF) &&
+ "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
+ MBB.erase(MI);
+ break;
+
+ default:
+ llvm_unreachable("Unexpected call frame instruction");
+ }
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
new file mode 100644
index 000000000000..9b0a1d5f224c
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -0,0 +1,78 @@
+//===-- SystemZFrameLowering.h - Frame lowering for SystemZ -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZFRAMELOWERING_H
+#define SYSTEMZFRAMELOWERING_H
+
+#include "SystemZSubtarget.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+namespace llvm {
+class SystemZTargetMachine;
+class SystemZSubtarget;
+
+class SystemZFrameLowering : public TargetFrameLowering {
+ IndexedMap<unsigned> RegSpillOffsets;
+
+protected:
+ const SystemZTargetMachine &TM;
+ const SystemZSubtarget &STI;
+
+public:
+ SystemZFrameLowering(const SystemZTargetMachine &tm,
+ const SystemZSubtarget &sti);
+
+ // Override TargetFrameLowering.
+ virtual bool isFPCloseToIncomingSP() const LLVM_OVERRIDE { return false; }
+ virtual const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const
+ LLVM_OVERRIDE;
+ virtual void
+ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const LLVM_OVERRIDE;
+ virtual bool
+ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const
+ LLVM_OVERRIDE;
+ virtual bool
+ restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBII,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const
+ LLVM_OVERRIDE;
+ virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const;
+ virtual void emitPrologue(MachineFunction &MF) const LLVM_OVERRIDE;
+ virtual void emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const LLVM_OVERRIDE;
+ virtual bool hasFP(const MachineFunction &MF) const LLVM_OVERRIDE;
+ virtual int getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const LLVM_OVERRIDE;
+ virtual bool hasReservedCallFrame(const MachineFunction &MF) const
+ LLVM_OVERRIDE;
+ virtual void
+ eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const
+ LLVM_OVERRIDE;
+
+ // Return the number of bytes in the callee-allocated part of the frame.
+ uint64_t getAllocatedStackSize(const MachineFunction &MF) const;
+
+ // Return the byte offset from the incoming stack pointer of Reg's
+ // ABI-defined save slot. Return 0 if no slot is defined for Reg.
+ unsigned getRegSpillOffset(unsigned Reg) const {
+ return RegSpillOffsets[Reg];
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
new file mode 100644
index 000000000000..f4a27733ce0e
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -0,0 +1,1148 @@
+//===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the SystemZ target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZTargetMachine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+// Used to build addressing modes.
+struct SystemZAddressingMode {
+ // The shape of the address.
+ enum AddrForm {
+ // base+displacement
+ FormBD,
+
+ // base+displacement+index for load and store operands
+ FormBDXNormal,
+
+ // base+displacement+index for load address operands
+ FormBDXLA,
+
+ // base+displacement+index+ADJDYNALLOC
+ FormBDXDynAlloc
+ };
+ AddrForm Form;
+
+ // The type of displacement. The enum names here correspond directly
+ // to the definitions in SystemZOperand.td. We could split them into
+ // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it.
+ enum DispRange {
+ Disp12Only,
+ Disp12Pair,
+ Disp20Only,
+ Disp20Only128,
+ Disp20Pair
+ };
+ DispRange DR;
+
+ // The parts of the address. The address is equivalent to:
+ //
+ // Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0)
+ SDValue Base;
+ int64_t Disp;
+ SDValue Index;
+ bool IncludesDynAlloc;
+
+ SystemZAddressingMode(AddrForm form, DispRange dr)
+ : Form(form), DR(dr), Base(), Disp(0), Index(),
+ IncludesDynAlloc(false) {}
+
+ // True if the address can have an index register.
+ bool hasIndexField() { return Form != FormBD; }
+
+ // True if the address can (and must) include ADJDYNALLOC.
+ bool isDynAlloc() { return Form == FormBDXDynAlloc; }
+
+ void dump() {
+ errs() << "SystemZAddressingMode " << this << '\n';
+
+ errs() << " Base ";
+ if (Base.getNode() != 0)
+ Base.getNode()->dump();
+ else
+ errs() << "null\n";
+
+ if (hasIndexField()) {
+ errs() << " Index ";
+ if (Index.getNode() != 0)
+ Index.getNode()->dump();
+ else
+ errs() << "null\n";
+ }
+
+ errs() << " Disp " << Disp;
+ if (IncludesDynAlloc)
+ errs() << " + ADJDYNALLOC";
+ errs() << '\n';
+ }
+};
+
+// Return a mask with Count low bits set.
+static uint64_t allOnes(unsigned int Count) {
+ return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
+}
+
+// Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation
+// given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and
+// Rotate (I5). The combined operand value is effectively:
+//
+// (or (rotl Input, Rotate), ~Mask)
+//
+// for RNSBG and:
+//
+// (and (rotl Input, Rotate), Mask)
+//
+// otherwise. The output value has BitSize bits, although Input may be
+// narrower (in which case the upper bits are don't care).
+struct RxSBGOperands {
+ RxSBGOperands(unsigned Op, SDValue N)
+ : Opcode(Op), BitSize(N.getValueType().getSizeInBits()),
+ Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63),
+ Rotate(0) {}
+
+ unsigned Opcode;
+ unsigned BitSize;
+ uint64_t Mask;
+ SDValue Input;
+ unsigned Start;
+ unsigned End;
+ unsigned Rotate;
+};
+
+class SystemZDAGToDAGISel : public SelectionDAGISel {
+ const SystemZTargetLowering &Lowering;
+ const SystemZSubtarget &Subtarget;
+
+ // Used by SystemZOperands.td to create integer constants.
+ inline SDValue getImm(const SDNode *Node, uint64_t Imm) const {
+ return CurDAG->getTargetConstant(Imm, Node->getValueType(0));
+ }
+
+ const SystemZTargetMachine &getTargetMachine() const {
+ return static_cast<const SystemZTargetMachine &>(TM);
+ }
+
+ const SystemZInstrInfo *getInstrInfo() const {
+ return getTargetMachine().getInstrInfo();
+ }
+
+ // Try to fold more of the base or index of AM into AM, where IsBase
+ // selects between the base and index.
+ bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const;
+
+ // Try to describe N in AM, returning true on success.
+ bool selectAddress(SDValue N, SystemZAddressingMode &AM) const;
+
+ // Extract individual target operands from matched address AM.
+ void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
+ SDValue &Base, SDValue &Disp) const;
+ void getAddressOperands(const SystemZAddressingMode &AM, EVT VT,
+ SDValue &Base, SDValue &Disp, SDValue &Index) const;
+
+ // Try to match Addr as a FormBD address with displacement type DR.
+ // Return true on success, storing the base and displacement in
+ // Base and Disp respectively.
+ bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
+ SDValue &Base, SDValue &Disp) const;
+
+ // Try to match Addr as a FormBDX address with displacement type DR.
+ // Return true on success and if the result had no index. Store the
+ // base and displacement in Base and Disp respectively.
+ bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr,
+ SDValue &Base, SDValue &Disp) const;
+
+ // Try to match Addr as a FormBDX* address of form Form with
+ // displacement type DR. Return true on success, storing the base,
+ // displacement and index in Base, Disp and Index respectively.
+ bool selectBDXAddr(SystemZAddressingMode::AddrForm Form,
+ SystemZAddressingMode::DispRange DR, SDValue Addr,
+ SDValue &Base, SDValue &Disp, SDValue &Index) const;
+
+ // PC-relative address matching routines used by SystemZOperands.td.
+ bool selectPCRelAddress(SDValue Addr, SDValue &Target) const {
+ if (SystemZISD::isPCREL(Addr.getOpcode())) {
+ Target = Addr.getOperand(0);
+ return true;
+ }
+ return false;
+ }
+
+ // BD matching routines used by SystemZOperands.td.
+ bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp);
+ }
+ bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
+ }
+ bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp);
+ }
+ bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
+ }
+
+ // MVI matching routines used by SystemZOperands.td.
+ bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp);
+ }
+ bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const {
+ return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp);
+ }
+
+ // BDX matching routines used by SystemZOperands.td.
+ bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
+ SystemZAddressingMode::Disp12Only,
+ Addr, Base, Disp, Index);
+ }
+ bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
+ SystemZAddressingMode::Disp12Pair,
+ Addr, Base, Disp, Index);
+ }
+ bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc,
+ SystemZAddressingMode::Disp12Only,
+ Addr, Base, Disp, Index);
+ }
+ bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
+ SystemZAddressingMode::Disp20Only,
+ Addr, Base, Disp, Index);
+ }
+ bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
+ SystemZAddressingMode::Disp20Only128,
+ Addr, Base, Disp, Index);
+ }
+ bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXNormal,
+ SystemZAddressingMode::Disp20Pair,
+ Addr, Base, Disp, Index);
+ }
+ bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
+ SystemZAddressingMode::Disp12Pair,
+ Addr, Base, Disp, Index);
+ }
+ bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp,
+ SDValue &Index) const {
+ return selectBDXAddr(SystemZAddressingMode::FormBDXLA,
+ SystemZAddressingMode::Disp20Pair,
+ Addr, Base, Disp, Index);
+ }
+
+ // Check whether (or Op (and X InsertMask)) is effectively an insertion
+ // of X into bits InsertMask of some Y != Op. Return true if so and
+ // set Op to that Y.
+ bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const;
+
+ // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used.
+ // Return true on success.
+ bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const;
+
+ // Try to fold some of RxSBG.Input into other fields of RxSBG.
+ // Return true on success.
+ bool expandRxSBG(RxSBGOperands &RxSBG) const;
+
+ // Return an undefined value of type VT.
+ SDValue getUNDEF(SDLoc DL, EVT VT) const;
+
+ // Convert N to VT, if it isn't already.
+ SDValue convertTo(SDLoc DL, EVT VT, SDValue N) const;
+
+ // Try to implement AND or shift node N using RISBG with the zero flag set.
+ // Return the selected node on success, otherwise return null.
+ SDNode *tryRISBGZero(SDNode *N);
+
+ // Try to use RISBG or Opcode to implement OR or XOR node N.
+ // Return the selected node on success, otherwise return null.
+ SDNode *tryRxSBG(SDNode *N, unsigned Opcode);
+
+ // If Op0 is null, then Node is a constant that can be loaded using:
+ //
+ // (Opcode UpperVal LowerVal)
+ //
+ // If Op0 is nonnull, then Node can be implemented using:
+ //
+ // (Opcode (Opcode Op0 UpperVal) LowerVal)
+ SDNode *splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0,
+ uint64_t UpperVal, uint64_t LowerVal);
+
+ // Return true if Load and Store are loads and stores of the same size
+ // and are guaranteed not to overlap. Such operations can be implemented
+ // using block (SS-format) instructions.
+ //
+ // Partial overlap would lead to incorrect code, since the block operations
+ // are logically bytewise, even though they have a fast path for the
+ // non-overlapping case. We also need to avoid full overlap (i.e. two
+ // addresses that might be equal at run time) because although that case
+ // would be handled correctly, it might be implemented by millicode.
+ bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const;
+
+ // N is a (store (load Y), X) pattern. Return true if it can use an MVC
+ // from Y to X.
+ bool storeLoadCanUseMVC(SDNode *N) const;
+
+ // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true
+ // if A[1 - I] == X and if N can use a block operation like NC from A[I]
+ // to X.
+ bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const;
+
+public:
+ SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel)
+ : SelectionDAGISel(TM, OptLevel),
+ Lowering(*TM.getTargetLowering()),
+ Subtarget(*TM.getSubtargetImpl()) { }
+
+ // Override MachineFunctionPass.
+ virtual const char *getPassName() const LLVM_OVERRIDE {
+ return "SystemZ DAG->DAG Pattern Instruction Selection";
+ }
+
+ // Override SelectionDAGISel.
+ virtual SDNode *Select(SDNode *Node) LLVM_OVERRIDE;
+ virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps)
+ LLVM_OVERRIDE;
+
+ // Include the pieces autogenerated from the target description.
+ #include "SystemZGenDAGISel.inc"
+};
+} // end anonymous namespace
+
+FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new SystemZDAGToDAGISel(TM, OptLevel);
+}
+
+// Return true if Val should be selected as a displacement for an address
+// with range DR. Here we're interested in the range of both the instruction
+// described by DR and of any pairing instruction.
+static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
+ switch (DR) {
+ case SystemZAddressingMode::Disp12Only:
+ return isUInt<12>(Val);
+
+ case SystemZAddressingMode::Disp12Pair:
+ case SystemZAddressingMode::Disp20Only:
+ case SystemZAddressingMode::Disp20Pair:
+ return isInt<20>(Val);
+
+ case SystemZAddressingMode::Disp20Only128:
+ return isInt<20>(Val) && isInt<20>(Val + 8);
+ }
+ llvm_unreachable("Unhandled displacement range");
+}
+
+// Change the base or index in AM to Value, where IsBase selects
+// between the base and index.
+static void changeComponent(SystemZAddressingMode &AM, bool IsBase,
+ SDValue Value) {
+ if (IsBase)
+ AM.Base = Value;
+ else
+ AM.Index = Value;
+}
+
+// The base or index of AM is equivalent to Value + ADJDYNALLOC,
+// where IsBase selects between the base and index. Try to fold the
+// ADJDYNALLOC into AM.
+static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase,
+ SDValue Value) {
+ if (AM.isDynAlloc() && !AM.IncludesDynAlloc) {
+ changeComponent(AM, IsBase, Value);
+ AM.IncludesDynAlloc = true;
+ return true;
+ }
+ return false;
+}
+
+// The base of AM is equivalent to Base + Index. Try to use Index as
+// the index register.
+static bool expandIndex(SystemZAddressingMode &AM, SDValue Base,
+ SDValue Index) {
+ if (AM.hasIndexField() && !AM.Index.getNode()) {
+ AM.Base = Base;
+ AM.Index = Index;
+ return true;
+ }
+ return false;
+}
+
+// The base or index of AM is equivalent to Op0 + Op1, where IsBase selects
+// between the base and index. Try to fold Op1 into AM's displacement.
+static bool expandDisp(SystemZAddressingMode &AM, bool IsBase,
+ SDValue Op0, uint64_t Op1) {
+ // First try adjusting the displacement.
+ int64_t TestDisp = AM.Disp + Op1;
+ if (selectDisp(AM.DR, TestDisp)) {
+ changeComponent(AM, IsBase, Op0);
+ AM.Disp = TestDisp;
+ return true;
+ }
+
+ // We could consider forcing the displacement into a register and
+ // using it as an index, but it would need to be carefully tuned.
+ return false;
+}
+
+bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM,
+ bool IsBase) const {
+ SDValue N = IsBase ? AM.Base : AM.Index;
+ unsigned Opcode = N.getOpcode();
+ if (Opcode == ISD::TRUNCATE) {
+ N = N.getOperand(0);
+ Opcode = N.getOpcode();
+ }
+ if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) {
+ SDValue Op0 = N.getOperand(0);
+ SDValue Op1 = N.getOperand(1);
+
+ unsigned Op0Code = Op0->getOpcode();
+ unsigned Op1Code = Op1->getOpcode();
+
+ if (Op0Code == SystemZISD::ADJDYNALLOC)
+ return expandAdjDynAlloc(AM, IsBase, Op1);
+ if (Op1Code == SystemZISD::ADJDYNALLOC)
+ return expandAdjDynAlloc(AM, IsBase, Op0);
+
+ if (Op0Code == ISD::Constant)
+ return expandDisp(AM, IsBase, Op1,
+ cast<ConstantSDNode>(Op0)->getSExtValue());
+ if (Op1Code == ISD::Constant)
+ return expandDisp(AM, IsBase, Op0,
+ cast<ConstantSDNode>(Op1)->getSExtValue());
+
+ if (IsBase && expandIndex(AM, Op0, Op1))
+ return true;
+ }
+ if (Opcode == SystemZISD::PCREL_OFFSET) {
+ SDValue Full = N.getOperand(0);
+ SDValue Base = N.getOperand(1);
+ SDValue Anchor = Base.getOperand(0);
+ uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() -
+ cast<GlobalAddressSDNode>(Anchor)->getOffset());
+ return expandDisp(AM, IsBase, Base, Offset);
+ }
+ return false;
+}
+
+// Return true if an instruction with displacement range DR should be
+// used for displacement value Val. selectDisp(DR, Val) must already hold.
+static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) {
+ assert(selectDisp(DR, Val) && "Invalid displacement");
+ switch (DR) {
+ case SystemZAddressingMode::Disp12Only:
+ case SystemZAddressingMode::Disp20Only:
+ case SystemZAddressingMode::Disp20Only128:
+ return true;
+
+ case SystemZAddressingMode::Disp12Pair:
+ // Use the other instruction if the displacement is too large.
+ return isUInt<12>(Val);
+
+ case SystemZAddressingMode::Disp20Pair:
+ // Use the other instruction if the displacement is small enough.
+ return !isUInt<12>(Val);
+ }
+ llvm_unreachable("Unhandled displacement range");
+}
+
+// Return true if Base + Disp + Index should be performed by LA(Y).
+static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) {
+ // Don't use LA(Y) for constants.
+ if (!Base)
+ return false;
+
+ // Always use LA(Y) for frame addresses, since we know that the destination
+ // register is almost always (perhaps always) going to be different from
+ // the frame register.
+ if (Base->getOpcode() == ISD::FrameIndex)
+ return true;
+
+ if (Disp) {
+ // Always use LA(Y) if there is a base, displacement and index.
+ if (Index)
+ return true;
+
+ // Always use LA if the displacement is small enough. It should always
+ // be no worse than AGHI (and better if it avoids a move).
+ if (isUInt<12>(Disp))
+ return true;
+
+ // For similar reasons, always use LAY if the constant is too big for AGHI.
+ // LAY should be no worse than AGFI.
+ if (!isInt<16>(Disp))
+ return true;
+ } else {
+ // Don't use LA for plain registers.
+ if (!Index)
+ return false;
+
+ // Don't use LA for plain addition if the index operand is only used
+ // once. It should be a natural two-operand addition in that case.
+ if (Index->hasOneUse())
+ return false;
+
+ // Prefer addition if the second operation is sign-extended, in the
+ // hope of using AGF.
+ unsigned IndexOpcode = Index->getOpcode();
+ if (IndexOpcode == ISD::SIGN_EXTEND ||
+ IndexOpcode == ISD::SIGN_EXTEND_INREG)
+ return false;
+ }
+
+ // Don't use LA for two-operand addition if either operand is only
+ // used once. The addition instructions are better in that case.
+ if (Base->hasOneUse())
+ return false;
+
+ return true;
+}
+
+// Return true if Addr is suitable for AM, updating AM if so.
+bool SystemZDAGToDAGISel::selectAddress(SDValue Addr,
+ SystemZAddressingMode &AM) const {
+ // Start out assuming that the address will need to be loaded separately,
+ // then try to extend it as much as we can.
+ AM.Base = Addr;
+
+ // First try treating the address as a constant.
+ if (Addr.getOpcode() == ISD::Constant &&
+ expandDisp(AM, true, SDValue(),
+ cast<ConstantSDNode>(Addr)->getSExtValue()))
+ ;
+ else
+ // Otherwise try expanding each component.
+ while (expandAddress(AM, true) ||
+ (AM.Index.getNode() && expandAddress(AM, false)))
+ continue;
+
+ // Reject cases where it isn't profitable to use LA(Y).
+ if (AM.Form == SystemZAddressingMode::FormBDXLA &&
+ !shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode()))
+ return false;
+
+ // Reject cases where the other instruction in a pair should be used.
+ if (!isValidDisp(AM.DR, AM.Disp))
+ return false;
+
+ // Make sure that ADJDYNALLOC is included where necessary.
+ if (AM.isDynAlloc() && !AM.IncludesDynAlloc)
+ return false;
+
+ DEBUG(AM.dump());
+ return true;
+}
+
+// Insert a node into the DAG at least before Pos. This will reposition
+// the node as needed, and will assign it a node ID that is <= Pos's ID.
+// Note that this does *not* preserve the uniqueness of node IDs!
+// The selection DAG must no longer depend on their uniqueness when this
+// function is used.
+static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) {
+ if (N.getNode()->getNodeId() == -1 ||
+ N.getNode()->getNodeId() > Pos->getNodeId()) {
+ DAG->RepositionNode(Pos, N.getNode());
+ N.getNode()->setNodeId(Pos->getNodeId());
+ }
+}
+
+void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
+ EVT VT, SDValue &Base,
+ SDValue &Disp) const {
+ Base = AM.Base;
+ if (!Base.getNode())
+ // Register 0 means "no base". This is mostly useful for shifts.
+ Base = CurDAG->getRegister(0, VT);
+ else if (Base.getOpcode() == ISD::FrameIndex) {
+ // Lower a FrameIndex to a TargetFrameIndex.
+ int64_t FrameIndex = cast<FrameIndexSDNode>(Base)->getIndex();
+ Base = CurDAG->getTargetFrameIndex(FrameIndex, VT);
+ } else if (Base.getValueType() != VT) {
+ // Truncate values from i64 to i32, for shifts.
+ assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 &&
+ "Unexpected truncation");
+ SDLoc DL(Base);
+ SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base);
+ insertDAGNode(CurDAG, Base.getNode(), Trunc);
+ Base = Trunc;
+ }
+
+ // Lower the displacement to a TargetConstant.
+ Disp = CurDAG->getTargetConstant(AM.Disp, VT);
+}
+
+void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM,
+ EVT VT, SDValue &Base,
+ SDValue &Disp,
+ SDValue &Index) const {
+ getAddressOperands(AM, VT, Base, Disp);
+
+ Index = AM.Index;
+ if (!Index.getNode())
+ // Register 0 means "no index".
+ Index = CurDAG->getRegister(0, VT);
+}
+
+bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR,
+ SDValue Addr, SDValue &Base,
+ SDValue &Disp) const {
+ SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR);
+ if (!selectAddress(Addr, AM))
+ return false;
+
+ getAddressOperands(AM, Addr.getValueType(), Base, Disp);
+ return true;
+}
+
+bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR,
+ SDValue Addr, SDValue &Base,
+ SDValue &Disp) const {
+ SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR);
+ if (!selectAddress(Addr, AM) || AM.Index.getNode())
+ return false;
+
+ getAddressOperands(AM, Addr.getValueType(), Base, Disp);
+ return true;
+}
+
+bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form,
+ SystemZAddressingMode::DispRange DR,
+ SDValue Addr, SDValue &Base,
+ SDValue &Disp, SDValue &Index) const {
+ SystemZAddressingMode AM(Form, DR);
+ if (!selectAddress(Addr, AM))
+ return false;
+
+ getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index);
+ return true;
+}
+
+bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
+ uint64_t InsertMask) const {
+ // We're only interested in cases where the insertion is into some operand
+ // of Op, rather than into Op itself. The only useful case is an AND.
+ if (Op.getOpcode() != ISD::AND)
+ return false;
+
+ // We need a constant mask.
+ ConstantSDNode *MaskNode =
+ dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
+ if (!MaskNode)
+ return false;
+
+ // It's not an insertion of Op.getOperand(0) if the two masks overlap.
+ uint64_t AndMask = MaskNode->getZExtValue();
+ if (InsertMask & AndMask)
+ return false;
+
+ // It's only an insertion if all bits are covered or are known to be zero.
+ // The inner check covers all cases but is more expensive.
+ uint64_t Used = allOnes(Op.getValueType().getSizeInBits());
+ if (Used != (AndMask | InsertMask)) {
+ APInt KnownZero, KnownOne;
+ CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne);
+ if (Used != (AndMask | InsertMask | KnownZero.getZExtValue()))
+ return false;
+ }
+
+ Op = Op.getOperand(0);
+ return true;
+}
+
+bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG,
+ uint64_t Mask) const {
+ const SystemZInstrInfo *TII = getInstrInfo();
+ if (RxSBG.Rotate != 0)
+ Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate));
+ Mask &= RxSBG.Mask;
+ if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) {
+ RxSBG.Mask = Mask;
+ return true;
+ }
+ return false;
+}
+
+// Return true if any bits of (RxSBG.Input & Mask) are significant.
+static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) {
+ // Rotate the mask in the same way as RxSBG.Input is rotated.
+ if (RxSBG.Rotate != 0)
+ Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)));
+ return (Mask & RxSBG.Mask) != 0;
+}
+
+bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
+ SDValue N = RxSBG.Input;
+ unsigned Opcode = N.getOpcode();
+ switch (Opcode) {
+ case ISD::AND: {
+ if (RxSBG.Opcode == SystemZ::RNSBG)
+ return false;
+
+ ConstantSDNode *MaskNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!MaskNode)
+ return false;
+
+ SDValue Input = N.getOperand(0);
+ uint64_t Mask = MaskNode->getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask)) {
+ // If some bits of Input are already known zeros, those bits will have
+ // been removed from the mask. See if adding them back in makes the
+ // mask suitable.
+ APInt KnownZero, KnownOne;
+ CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ Mask |= KnownZero.getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask))
+ return false;
+ }
+ RxSBG.Input = Input;
+ return true;
+ }
+
+ case ISD::OR: {
+ if (RxSBG.Opcode != SystemZ::RNSBG)
+ return false;
+
+ ConstantSDNode *MaskNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!MaskNode)
+ return false;
+
+ SDValue Input = N.getOperand(0);
+ uint64_t Mask = ~MaskNode->getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask)) {
+ // If some bits of Input are already known ones, those bits will have
+ // been removed from the mask. See if adding them back in makes the
+ // mask suitable.
+ APInt KnownZero, KnownOne;
+ CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ Mask &= ~KnownOne.getZExtValue();
+ if (!refineRxSBGMask(RxSBG, Mask))
+ return false;
+ }
+ RxSBG.Input = Input;
+ return true;
+ }
+
+ case ISD::ROTL: {
+ // Any 64-bit rotate left can be merged into the RxSBG.
+ if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
+ return false;
+ ConstantSDNode *CountNode
+ = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!CountNode)
+ return false;
+
+ RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+
+ case ISD::SIGN_EXTEND:
+ case ISD::ZERO_EXTEND:
+ case ISD::ANY_EXTEND: {
+ // Check that the extension bits are don't-care (i.e. are masked out
+ // by the final mask).
+ unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
+ if (maskMatters(RxSBG, allOnes(RxSBG.BitSize) - allOnes(InnerBitSize)))
+ return false;
+
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+
+ case ISD::SHL: {
+ ConstantSDNode *CountNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!CountNode)
+ return false;
+
+ uint64_t Count = CountNode->getZExtValue();
+ unsigned BitSize = N.getValueType().getSizeInBits();
+ if (Count < 1 || Count >= BitSize)
+ return false;
+
+ if (RxSBG.Opcode == SystemZ::RNSBG) {
+ // Treat (shl X, count) as (rotl X, size-count) as long as the bottom
+ // count bits from RxSBG.Input are ignored.
+ if (maskMatters(RxSBG, allOnes(Count)))
+ return false;
+ } else {
+ // Treat (shl X, count) as (and (rotl X, count), ~0<<count).
+ if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count))
+ return false;
+ }
+
+ RxSBG.Rotate = (RxSBG.Rotate + Count) & 63;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+
+ case ISD::SRL:
+ case ISD::SRA: {
+ ConstantSDNode *CountNode =
+ dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ if (!CountNode)
+ return false;
+
+ uint64_t Count = CountNode->getZExtValue();
+ unsigned BitSize = N.getValueType().getSizeInBits();
+ if (Count < 1 || Count >= BitSize)
+ return false;
+
+ if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) {
+ // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top
+ // count bits from RxSBG.Input are ignored.
+ if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count)))
+ return false;
+ } else {
+ // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count),
+ // which is similar to SLL above.
+ if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count)))
+ return false;
+ }
+
+ RxSBG.Rotate = (RxSBG.Rotate - Count) & 63;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+ default:
+ return false;
+ }
+}
+
+SDValue SystemZDAGToDAGISel::getUNDEF(SDLoc DL, EVT VT) const {
+ SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT);
+ return SDValue(N, 0);
+}
+
+SDValue SystemZDAGToDAGISel::convertTo(SDLoc DL, EVT VT, SDValue N) const {
+ if (N.getValueType() == MVT::i32 && VT == MVT::i64)
+ return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32,
+ DL, VT, getUNDEF(DL, MVT::i64), N);
+ if (N.getValueType() == MVT::i64 && VT == MVT::i32)
+ return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N);
+ assert(N.getValueType() == VT && "Unexpected value types");
+ return N;
+}
+
+SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
+ EVT VT = N->getValueType(0);
+ RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
+ unsigned Count = 0;
+ while (expandRxSBG(RISBG))
+ if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND)
+ Count += 1;
+ if (Count == 0)
+ return 0;
+ if (Count == 1) {
+ // Prefer to use normal shift instructions over RISBG, since they can handle
+ // all cases and are sometimes shorter.
+ if (N->getOpcode() != ISD::AND)
+ return 0;
+
+ // Prefer register extensions like LLC over RISBG. Also prefer to start
+ // out with normal ANDs if one instruction would be enough. We can convert
+ // these ANDs into an RISBG later if a three-address instruction is useful.
+ if (VT == MVT::i32 ||
+ RISBG.Mask == 0xff ||
+ RISBG.Mask == 0xffff ||
+ SystemZ::isImmLF(~RISBG.Mask) ||
+ SystemZ::isImmHF(~RISBG.Mask)) {
+ // Force the new mask into the DAG, since it may include known-one bits.
+ ConstantSDNode *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
+ if (MaskN->getZExtValue() != RISBG.Mask) {
+ SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT);
+ N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask);
+ return SelectCode(N);
+ }
+ return 0;
+ }
+ }
+
+ unsigned Opcode = SystemZ::RISBG;
+ EVT OpcodeVT = MVT::i64;
+ if (VT == MVT::i32 && Subtarget.hasHighWord()) {
+ Opcode = SystemZ::RISBMux;
+ OpcodeVT = MVT::i32;
+ RISBG.Start &= 31;
+ RISBG.End &= 31;
+ }
+ SDValue Ops[5] = {
+ getUNDEF(SDLoc(N), OpcodeVT),
+ convertTo(SDLoc(N), OpcodeVT, RISBG.Input),
+ CurDAG->getTargetConstant(RISBG.Start, MVT::i32),
+ CurDAG->getTargetConstant(RISBG.End | 128, MVT::i32),
+ CurDAG->getTargetConstant(RISBG.Rotate, MVT::i32)
+ };
+ N = CurDAG->getMachineNode(Opcode, SDLoc(N), OpcodeVT, Ops);
+ return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode();
+}
+
+SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
+ // Try treating each operand of N as the second operand of the RxSBG
+ // and see which goes deepest.
+ RxSBGOperands RxSBG[] = {
+ RxSBGOperands(Opcode, N->getOperand(0)),
+ RxSBGOperands(Opcode, N->getOperand(1))
+ };
+ unsigned Count[] = { 0, 0 };
+ for (unsigned I = 0; I < 2; ++I)
+ while (expandRxSBG(RxSBG[I]))
+ if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND)
+ Count[I] += 1;
+
+ // Do nothing if neither operand is suitable.
+ if (Count[0] == 0 && Count[1] == 0)
+ return 0;
+
+ // Pick the deepest second operand.
+ unsigned I = Count[0] > Count[1] ? 0 : 1;
+ SDValue Op0 = N->getOperand(I ^ 1);
+
+ // Prefer IC for character insertions from memory.
+ if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
+ if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
+ if (Load->getMemoryVT() == MVT::i8)
+ return 0;
+
+ // See whether we can avoid an AND in the first operand by converting
+ // ROSBG to RISBG.
+ if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask))
+ Opcode = SystemZ::RISBG;
+
+ EVT VT = N->getValueType(0);
+ SDValue Ops[5] = {
+ convertTo(SDLoc(N), MVT::i64, Op0),
+ convertTo(SDLoc(N), MVT::i64, RxSBG[I].Input),
+ CurDAG->getTargetConstant(RxSBG[I].Start, MVT::i32),
+ CurDAG->getTargetConstant(RxSBG[I].End, MVT::i32),
+ CurDAG->getTargetConstant(RxSBG[I].Rotate, MVT::i32)
+ };
+ N = CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i64, Ops);
+ return convertTo(SDLoc(N), VT, SDValue(N, 0)).getNode();
+}
+
+SDNode *SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node,
+ SDValue Op0, uint64_t UpperVal,
+ uint64_t LowerVal) {
+ EVT VT = Node->getValueType(0);
+ SDLoc DL(Node);
+ SDValue Upper = CurDAG->getConstant(UpperVal, VT);
+ if (Op0.getNode())
+ Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper);
+ Upper = SDValue(Select(Upper.getNode()), 0);
+
+ SDValue Lower = CurDAG->getConstant(LowerVal, VT);
+ SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower);
+ return Or.getNode();
+}
+
+bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
+ LoadSDNode *Load) const {
+ // Check that the two memory operands have the same size.
+ if (Load->getMemoryVT() != Store->getMemoryVT())
+ return false;
+
+ // Volatility stops an access from being decomposed.
+ if (Load->isVolatile() || Store->isVolatile())
+ return false;
+
+ // There's no chance of overlap if the load is invariant.
+ if (Load->isInvariant())
+ return true;
+
+ // Otherwise we need to check whether there's an alias.
+ const Value *V1 = Load->getSrcValue();
+ const Value *V2 = Store->getSrcValue();
+ if (!V1 || !V2)
+ return false;
+
+ // Reject equality.
+ uint64_t Size = Load->getMemoryVT().getStoreSize();
+ int64_t End1 = Load->getSrcValueOffset() + Size;
+ int64_t End2 = Store->getSrcValueOffset() + Size;
+ if (V1 == V2 && End1 == End2)
+ return false;
+
+ return !AA->alias(AliasAnalysis::Location(V1, End1, Load->getTBAAInfo()),
+ AliasAnalysis::Location(V2, End2, Store->getTBAAInfo()));
+}
+
+bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
+ StoreSDNode *Store = cast<StoreSDNode>(N);
+ LoadSDNode *Load = cast<LoadSDNode>(Store->getValue());
+
+ // Prefer not to use MVC if either address can use ... RELATIVE LONG
+ // instructions.
+ uint64_t Size = Load->getMemoryVT().getStoreSize();
+ if (Size > 1 && Size <= 8) {
+ // Prefer LHRL, LRL and LGRL.
+ if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode()))
+ return false;
+ // Prefer STHRL, STRL and STGRL.
+ if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode()))
+ return false;
+ }
+
+ return canUseBlockOperation(Store, Load);
+}
+
+bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
+ unsigned I) const {
+ StoreSDNode *StoreA = cast<StoreSDNode>(N);
+ LoadSDNode *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
+ LoadSDNode *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
+ return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB);
+}
+
+SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
+ // Dump information about the Node being selected
+ DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n");
+
+ // If we have a custom node, we already have selected!
+ if (Node->isMachineOpcode()) {
+ DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ Node->setNodeId(-1);
+ return 0;
+ }
+
+ unsigned Opcode = Node->getOpcode();
+ SDNode *ResNode = 0;
+ switch (Opcode) {
+ case ISD::OR:
+ if (Node->getOperand(1).getOpcode() != ISD::Constant)
+ ResNode = tryRxSBG(Node, SystemZ::ROSBG);
+ goto or_xor;
+
+ case ISD::XOR:
+ if (Node->getOperand(1).getOpcode() != ISD::Constant)
+ ResNode = tryRxSBG(Node, SystemZ::RXSBG);
+ // Fall through.
+ or_xor:
+ // If this is a 64-bit operation in which both 32-bit halves are nonzero,
+ // split the operation into two.
+ if (!ResNode && Node->getValueType(0) == MVT::i64)
+ if (ConstantSDNode *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
+ uint64_t Val = Op1->getZExtValue();
+ if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val))
+ Node = splitLargeImmediate(Opcode, Node, Node->getOperand(0),
+ Val - uint32_t(Val), uint32_t(Val));
+ }
+ break;
+
+ case ISD::AND:
+ if (Node->getOperand(1).getOpcode() != ISD::Constant)
+ ResNode = tryRxSBG(Node, SystemZ::RNSBG);
+ // Fall through.
+ case ISD::ROTL:
+ case ISD::SHL:
+ case ISD::SRL:
+ if (!ResNode)
+ ResNode = tryRISBGZero(Node);
+ break;
+
+ case ISD::Constant:
+ // If this is a 64-bit constant that is out of the range of LLILF,
+ // LLIHF and LGFI, split it into two 32-bit pieces.
+ if (Node->getValueType(0) == MVT::i64) {
+ uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue();
+ if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val))
+ Node = splitLargeImmediate(ISD::OR, Node, SDValue(),
+ Val - uint32_t(Val), uint32_t(Val));
+ }
+ break;
+
+ case ISD::ATOMIC_LOAD_SUB:
+ // Try to convert subtractions of constants to additions.
+ if (ConstantSDNode *Op2 = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
+ uint64_t Value = -Op2->getZExtValue();
+ EVT VT = Node->getValueType(0);
+ if (VT == MVT::i32 || isInt<32>(Value)) {
+ SDValue Ops[] = { Node->getOperand(0), Node->getOperand(1),
+ CurDAG->getConstant(int32_t(Value), VT) };
+ Node = CurDAG->MorphNodeTo(Node, ISD::ATOMIC_LOAD_ADD,
+ Node->getVTList(), Ops, array_lengthof(Ops));
+ }
+ }
+ break;
+
+ case SystemZISD::SELECT_CCMASK: {
+ SDValue Op0 = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ // Prefer to put any load first, so that it can be matched as a
+ // conditional load.
+ if (Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) {
+ SDValue CCValid = Node->getOperand(2);
+ SDValue CCMask = Node->getOperand(3);
+ uint64_t ConstCCValid =
+ cast<ConstantSDNode>(CCValid.getNode())->getZExtValue();
+ uint64_t ConstCCMask =
+ cast<ConstantSDNode>(CCMask.getNode())->getZExtValue();
+ // Invert the condition.
+ CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask,
+ CCMask.getValueType());
+ SDValue Op4 = Node->getOperand(4);
+ Node = CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4);
+ }
+ break;
+ }
+ }
+
+ // Select the default instruction
+ if (!ResNode)
+ ResNode = SelectCode(Node);
+
+ DEBUG(errs() << "=> ";
+ if (ResNode == NULL || ResNode == Node)
+ Node->dump(CurDAG);
+ else
+ ResNode->dump(CurDAG);
+ errs() << "\n";
+ );
+ return ResNode;
+}
+
+bool SystemZDAGToDAGISel::
+SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) {
+ assert(ConstraintCode == 'm' && "Unexpected constraint code");
+ // Accept addresses with short displacements, which are compatible
+ // with Q, R, S and T. But keep the index operand for future expansion.
+ SDValue Base, Disp, Index;
+ if (!selectBDXAddr(SystemZAddressingMode::FormBD,
+ SystemZAddressingMode::Disp12Only,
+ Op, Base, Disp, Index))
+ return true;
+ OutOps.push_back(Base);
+ OutOps.push_back(Disp);
+ OutOps.push_back(Index);
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
new file mode 100644
index 000000000000..f6e18530f4a5
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -0,0 +1,3248 @@
+//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SystemZTargetLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-lower"
+
+#include "SystemZISelLowering.h"
+#include "SystemZCallingConv.h"
+#include "SystemZConstantPoolValue.h"
+#include "SystemZMachineFunctionInfo.h"
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+#include <cctype>
+
+using namespace llvm;
+
+namespace {
+// Represents a sequence for extracting a 0/1 value from an IPM result:
+// (((X ^ XORValue) + AddValue) >> Bit)
+struct IPMConversion {
+ IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit)
+ : XORValue(xorValue), AddValue(addValue), Bit(bit) {}
+
+ int64_t XORValue;
+ int64_t AddValue;
+ unsigned Bit;
+};
+}
+
+// Classify VT as either 32 or 64 bit.
+static bool is32Bit(EVT VT) {
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::i32:
+ return true;
+ case MVT::i64:
+ return false;
+ default:
+ llvm_unreachable("Unsupported type");
+ }
+}
+
+// Return a version of MachineOperand that can be safely used before the
+// final use.
+static MachineOperand earlyUseOperand(MachineOperand Op) {
+ if (Op.isReg())
+ Op.setIsKill(false);
+ return Op;
+}
+
+SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
+ : TargetLowering(tm, new TargetLoweringObjectFileELF()),
+ Subtarget(*tm.getSubtargetImpl()), TM(tm) {
+ MVT PtrVT = getPointerTy();
+
+ // Set up the register classes.
+ if (Subtarget.hasHighWord())
+ addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
+ else
+ addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
+ addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
+ addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
+ addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
+ addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
+
+ // Compute derived properties from the register classes
+ computeRegisterProperties();
+
+ // Set up special registers.
+ setExceptionPointerRegister(SystemZ::R6D);
+ setExceptionSelectorRegister(SystemZ::R7D);
+ setStackPointerRegisterToSaveRestore(SystemZ::R15D);
+
+ // TODO: It may be better to default to latency-oriented scheduling, however
+ // LLVM's current latency-oriented scheduler can't handle physreg definitions
+ // such as SystemZ has with CC, so set this to the register-pressure
+ // scheduler, because it can.
+ setSchedulingPreference(Sched::RegPressure);
+
+ setBooleanContents(ZeroOrOneBooleanContent);
+ setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
+
+ // Instructions are strings of 2-byte aligned 2-byte values.
+ setMinFunctionAlignment(2);
+
+ // Handle operations that are handled in a similar way for all types.
+ for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
+ I <= MVT::LAST_FP_VALUETYPE;
+ ++I) {
+ MVT VT = MVT::SimpleValueType(I);
+ if (isTypeLegal(VT)) {
+ // Lower SET_CC into an IPM-based sequence.
+ setOperationAction(ISD::SETCC, VT, Custom);
+
+ // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
+ setOperationAction(ISD::SELECT, VT, Expand);
+
+ // Lower SELECT_CC and BR_CC into separate comparisons and branches.
+ setOperationAction(ISD::SELECT_CC, VT, Custom);
+ setOperationAction(ISD::BR_CC, VT, Custom);
+ }
+ }
+
+ // Expand jump table branches as address arithmetic followed by an
+ // indirect jump.
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+
+ // Expand BRCOND into a BR_CC (see above).
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+
+ // Handle integer types.
+ for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
+ I <= MVT::LAST_INTEGER_VALUETYPE;
+ ++I) {
+ MVT VT = MVT::SimpleValueType(I);
+ if (isTypeLegal(VT)) {
+ // Expand individual DIV and REMs into DIVREMs.
+ setOperationAction(ISD::SDIV, VT, Expand);
+ setOperationAction(ISD::UDIV, VT, Expand);
+ setOperationAction(ISD::SREM, VT, Expand);
+ setOperationAction(ISD::UREM, VT, Expand);
+ setOperationAction(ISD::SDIVREM, VT, Custom);
+ setOperationAction(ISD::UDIVREM, VT, Custom);
+
+ // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP.
+ // FIXME: probably much too conservative.
+ setOperationAction(ISD::ATOMIC_LOAD, VT, Expand);
+ setOperationAction(ISD::ATOMIC_STORE, VT, Expand);
+
+ // No special instructions for these.
+ setOperationAction(ISD::CTPOP, VT, Expand);
+ setOperationAction(ISD::CTTZ, VT, Expand);
+ setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
+ setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
+ setOperationAction(ISD::ROTR, VT, Expand);
+
+ // Use *MUL_LOHI where possible instead of MULH*.
+ setOperationAction(ISD::MULHS, VT, Expand);
+ setOperationAction(ISD::MULHU, VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, VT, Custom);
+ setOperationAction(ISD::UMUL_LOHI, VT, Custom);
+
+ // We have instructions for signed but not unsigned FP conversion.
+ setOperationAction(ISD::FP_TO_UINT, VT, Expand);
+ }
+ }
+
+ // Type legalization will convert 8- and 16-bit atomic operations into
+ // forms that operate on i32s (but still keeping the original memory VT).
+ // Lower them into full i32 operations.
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
+
+ // We have instructions for signed but not unsigned FP conversion.
+ // Handle unsigned 32-bit types as signed 64-bit types.
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+
+ // We have native support for a 64-bit CTLZ, via FLOGR.
+ setOperationAction(ISD::CTLZ, MVT::i32, Promote);
+ setOperationAction(ISD::CTLZ, MVT::i64, Legal);
+
+ // Give LowerOperation the chance to replace 64-bit ORs with subregs.
+ setOperationAction(ISD::OR, MVT::i64, Custom);
+
+ // FIXME: Can we support these natively?
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
+
+ // We have native instructions for i8, i16 and i32 extensions, but not i1.
+ setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
+ setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ // Handle the various types of symbolic address.
+ setOperationAction(ISD::ConstantPool, PtrVT, Custom);
+ setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
+ setOperationAction(ISD::BlockAddress, PtrVT, Custom);
+ setOperationAction(ISD::JumpTable, PtrVT, Custom);
+
+ // We need to handle dynamic allocations specially because of the
+ // 160-byte area at the bottom of the stack.
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
+
+ // Use custom expanders so that we can force the function to use
+ // a frame pointer.
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
+
+ // Handle prefetches with PFD or PFDRL.
+ setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
+
+ // Handle floating-point types.
+ for (unsigned I = MVT::FIRST_FP_VALUETYPE;
+ I <= MVT::LAST_FP_VALUETYPE;
+ ++I) {
+ MVT VT = MVT::SimpleValueType(I);
+ if (isTypeLegal(VT)) {
+ // We can use FI for FRINT.
+ setOperationAction(ISD::FRINT, VT, Legal);
+
+ // We can use the extended form of FI for other rounding operations.
+ if (Subtarget.hasFPExtension()) {
+ setOperationAction(ISD::FNEARBYINT, VT, Legal);
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FROUND, VT, Legal);
+ }
+
+ // No special instructions for these.
+ setOperationAction(ISD::FSIN, VT, Expand);
+ setOperationAction(ISD::FCOS, VT, Expand);
+ setOperationAction(ISD::FREM, VT, Expand);
+ }
+ }
+
+ // We have fused multiply-addition for f32 and f64 but not f128.
+ setOperationAction(ISD::FMA, MVT::f32, Legal);
+ setOperationAction(ISD::FMA, MVT::f64, Legal);
+ setOperationAction(ISD::FMA, MVT::f128, Expand);
+
+ // Needed so that we don't try to implement f128 constant loads using
+ // a load-and-extend of a f80 constant (in cases where the constant
+ // would fit in an f80).
+ setLoadExtAction(ISD::EXTLOAD, MVT::f80, Expand);
+
+ // Floating-point truncation and stores need to be done separately.
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f64, Expand);
+
+ // We have 64-bit FPR<->GPR moves, but need special handling for
+ // 32-bit forms.
+ setOperationAction(ISD::BITCAST, MVT::i32, Custom);
+ setOperationAction(ISD::BITCAST, MVT::f32, Custom);
+
+ // VASTART and VACOPY need to deal with the SystemZ-specific varargs
+ // structure, but VAEND is a no-op.
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VACOPY, MVT::Other, Custom);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
+
+ // We want to use MVC in preference to even a single load/store pair.
+ MaxStoresPerMemcpy = 0;
+ MaxStoresPerMemcpyOptSize = 0;
+
+ // The main memset sequence is a byte store followed by an MVC.
+ // Two STC or MV..I stores win over that, but the kind of fused stores
+ // generated by target-independent code don't when the byte value is
+ // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
+ // than "STC;MVC". Handle the choice in target-specific code instead.
+ MaxStoresPerMemset = 0;
+ MaxStoresPerMemsetOptSize = 0;
+}
+
+EVT SystemZTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
+}
+
+bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
+ VT = VT.getScalarType();
+
+ if (!VT.isSimple())
+ return false;
+
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::f32:
+ case MVT::f64:
+ return true;
+ case MVT::f128:
+ return false;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
+ // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
+ return Imm.isZero() || Imm.isNegZero();
+}
+
+bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ bool *Fast) const {
+ // Unaligned accesses should never be slower than the expanded version.
+ // We check specifically for aligned accesses in the few cases where
+ // they are required.
+ if (Fast)
+ *Fast = true;
+ return true;
+}
+
+bool SystemZTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+ // Punt on globals for now, although they can be used in limited
+ // RELATIVE LONG cases.
+ if (AM.BaseGV)
+ return false;
+
+ // Require a 20-bit signed offset.
+ if (!isInt<20>(AM.BaseOffs))
+ return false;
+
+ // Indexing is OK but no scale factor can be applied.
+ return AM.Scale == 0 || AM.Scale == 1;
+}
+
+bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
+ if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
+ return false;
+ unsigned FromBits = FromType->getPrimitiveSizeInBits();
+ unsigned ToBits = ToType->getPrimitiveSizeInBits();
+ return FromBits > ToBits;
+}
+
+bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
+ if (!FromVT.isInteger() || !ToVT.isInteger())
+ return false;
+ unsigned FromBits = FromVT.getSizeInBits();
+ unsigned ToBits = ToVT.getSizeInBits();
+ return FromBits > ToBits;
+}
+
+//===----------------------------------------------------------------------===//
+// Inline asm support
+//===----------------------------------------------------------------------===//
+
+TargetLowering::ConstraintType
+SystemZTargetLowering::getConstraintType(const std::string &Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'a': // Address register
+ case 'd': // Data register (equivalent to 'r')
+ case 'f': // Floating-point register
+ case 'h': // High-part register
+ case 'r': // General-purpose register
+ return C_RegisterClass;
+
+ case 'Q': // Memory with base and unsigned 12-bit displacement
+ case 'R': // Likewise, plus an index
+ case 'S': // Memory with base and signed 20-bit displacement
+ case 'T': // Likewise, plus an index
+ case 'm': // Equivalent to 'T'.
+ return C_Memory;
+
+ case 'I': // Unsigned 8-bit constant
+ case 'J': // Unsigned 12-bit constant
+ case 'K': // Signed 16-bit constant
+ case 'L': // Signed 20-bit displacement (on all targets we support)
+ case 'M': // 0x7fffffff
+ return C_Other;
+
+ default:
+ break;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+TargetLowering::ConstraintWeight SystemZTargetLowering::
+getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (CallOperandVal == NULL)
+ return CW_Default;
+ Type *type = CallOperandVal->getType();
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+
+ case 'a': // Address register
+ case 'd': // Data register (equivalent to 'r')
+ case 'h': // High-part register
+ case 'r': // General-purpose register
+ if (CallOperandVal->getType()->isIntegerTy())
+ weight = CW_Register;
+ break;
+
+ case 'f': // Floating-point register
+ if (type->isFloatingPointTy())
+ weight = CW_Register;
+ break;
+
+ case 'I': // Unsigned 8-bit constant
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (isUInt<8>(C->getZExtValue()))
+ weight = CW_Constant;
+ break;
+
+ case 'J': // Unsigned 12-bit constant
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (isUInt<12>(C->getZExtValue()))
+ weight = CW_Constant;
+ break;
+
+ case 'K': // Signed 16-bit constant
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (isInt<16>(C->getSExtValue()))
+ weight = CW_Constant;
+ break;
+
+ case 'L': // Signed 20-bit displacement (on all targets we support)
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (isInt<20>(C->getSExtValue()))
+ weight = CW_Constant;
+ break;
+
+ case 'M': // 0x7fffffff
+ if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (C->getZExtValue() == 0x7fffffff)
+ weight = CW_Constant;
+ break;
+ }
+ return weight;
+}
+
+// Parse a "{tNNN}" register constraint for which the register type "t"
+// has already been verified. MC is the class associated with "t" and
+// Map maps 0-based register numbers to LLVM register numbers.
+static std::pair<unsigned, const TargetRegisterClass *>
+parseRegisterNumber(const std::string &Constraint,
+ const TargetRegisterClass *RC, const unsigned *Map) {
+ assert(*(Constraint.end()-1) == '}' && "Missing '}'");
+ if (isdigit(Constraint[2])) {
+ std::string Suffix(Constraint.data() + 2, Constraint.size() - 2);
+ unsigned Index = atoi(Suffix.c_str());
+ if (Index < 16 && Map[Index])
+ return std::make_pair(Map[Index], RC);
+ }
+ return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
+}
+
+std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
+getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const {
+ if (Constraint.size() == 1) {
+ // GCC Constraint Letters
+ switch (Constraint[0]) {
+ default: break;
+ case 'd': // Data register (equivalent to 'r')
+ case 'r': // General-purpose register
+ if (VT == MVT::i64)
+ return std::make_pair(0U, &SystemZ::GR64BitRegClass);
+ else if (VT == MVT::i128)
+ return std::make_pair(0U, &SystemZ::GR128BitRegClass);
+ return std::make_pair(0U, &SystemZ::GR32BitRegClass);
+
+ case 'a': // Address register
+ if (VT == MVT::i64)
+ return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
+ else if (VT == MVT::i128)
+ return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
+ return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
+
+ case 'h': // High-part register (an LLVM extension)
+ return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
+
+ case 'f': // Floating-point register
+ if (VT == MVT::f64)
+ return std::make_pair(0U, &SystemZ::FP64BitRegClass);
+ else if (VT == MVT::f128)
+ return std::make_pair(0U, &SystemZ::FP128BitRegClass);
+ return std::make_pair(0U, &SystemZ::FP32BitRegClass);
+ }
+ }
+ if (Constraint[0] == '{') {
+ // We need to override the default register parsing for GPRs and FPRs
+ // because the interpretation depends on VT. The internal names of
+ // the registers are also different from the external names
+ // (F0D and F0S instead of F0, etc.).
+ if (Constraint[1] == 'r') {
+ if (VT == MVT::i32)
+ return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
+ SystemZMC::GR32Regs);
+ if (VT == MVT::i128)
+ return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
+ SystemZMC::GR128Regs);
+ return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
+ SystemZMC::GR64Regs);
+ }
+ if (Constraint[1] == 'f') {
+ if (VT == MVT::f32)
+ return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
+ SystemZMC::FP32Regs);
+ if (VT == MVT::f128)
+ return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
+ SystemZMC::FP128Regs);
+ return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
+ SystemZMC::FP64Regs);
+ }
+ }
+ return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+}
+
+void SystemZTargetLowering::
+LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ // Only support length 1 constraints for now.
+ if (Constraint.length() == 1) {
+ switch (Constraint[0]) {
+ case 'I': // Unsigned 8-bit constant
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (isUInt<8>(C->getZExtValue()))
+ Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
+ Op.getValueType()));
+ return;
+
+ case 'J': // Unsigned 12-bit constant
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (isUInt<12>(C->getZExtValue()))
+ Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
+ Op.getValueType()));
+ return;
+
+ case 'K': // Signed 16-bit constant
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (isInt<16>(C->getSExtValue()))
+ Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
+ Op.getValueType()));
+ return;
+
+ case 'L': // Signed 20-bit displacement (on all targets we support)
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (isInt<20>(C->getSExtValue()))
+ Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
+ Op.getValueType()));
+ return;
+
+ case 'M': // 0x7fffffff
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (C->getZExtValue() == 0x7fffffff)
+ Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
+ Op.getValueType()));
+ return;
+ }
+ }
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+//===----------------------------------------------------------------------===//
+// Calling conventions
+//===----------------------------------------------------------------------===//
+
+#include "SystemZGenCallingConv.inc"
+
+bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
+ Type *ToType) const {
+ return isTruncateFree(FromType, ToType);
+}
+
+bool SystemZTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+ if (!CI->isTailCall())
+ return false;
+ return true;
+}
+
+// Value is a value that has been passed to us in the location described by VA
+// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
+// any loads onto Chain.
+static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDLoc DL,
+ CCValAssign &VA, SDValue Chain,
+ SDValue Value) {
+ // If the argument has been promoted from a smaller type, insert an
+ // assertion to capture this.
+ if (VA.getLocInfo() == CCValAssign::SExt)
+ Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
+ DAG.getValueType(VA.getValVT()));
+ else if (VA.getLocInfo() == CCValAssign::ZExt)
+ Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
+ DAG.getValueType(VA.getValVT()));
+
+ if (VA.isExtInLoc())
+ Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
+ else if (VA.getLocInfo() == CCValAssign::Indirect)
+ Value = DAG.getLoad(VA.getValVT(), DL, Chain, Value,
+ MachinePointerInfo(), false, false, false, 0);
+ else
+ assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo");
+ return Value;
+}
+
+// Value is a value of type VA.getValVT() that we need to copy into
+// the location described by VA. Return a copy of Value converted to
+// VA.getValVT(). The caller is responsible for handling indirect values.
+static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDLoc DL,
+ CCValAssign &VA, SDValue Value) {
+ switch (VA.getLocInfo()) {
+ case CCValAssign::SExt:
+ return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
+ case CCValAssign::ZExt:
+ return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
+ case CCValAssign::AExt:
+ return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
+ case CCValAssign::Full:
+ return Value;
+ default:
+ llvm_unreachable("Unhandled getLocInfo()");
+ }
+}
+
+SDValue SystemZTargetLowering::
+LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ SystemZMachineFunctionInfo *FuncInfo =
+ MF.getInfo<SystemZMachineFunctionInfo>();
+ const SystemZFrameLowering *TFL =
+ static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
+
+ unsigned NumFixedGPRs = 0;
+ unsigned NumFixedFPRs = 0;
+ for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
+ SDValue ArgValue;
+ CCValAssign &VA = ArgLocs[I];
+ EVT LocVT = VA.getLocVT();
+ if (VA.isRegLoc()) {
+ // Arguments passed in registers
+ const TargetRegisterClass *RC;
+ switch (LocVT.getSimpleVT().SimpleTy) {
+ default:
+ // Integers smaller than i64 should be promoted to i64.
+ llvm_unreachable("Unexpected argument type");
+ case MVT::i32:
+ NumFixedGPRs += 1;
+ RC = &SystemZ::GR32BitRegClass;
+ break;
+ case MVT::i64:
+ NumFixedGPRs += 1;
+ RC = &SystemZ::GR64BitRegClass;
+ break;
+ case MVT::f32:
+ NumFixedFPRs += 1;
+ RC = &SystemZ::FP32BitRegClass;
+ break;
+ case MVT::f64:
+ NumFixedFPRs += 1;
+ RC = &SystemZ::FP64BitRegClass;
+ break;
+ }
+
+ unsigned VReg = MRI.createVirtualRegister(RC);
+ MRI.addLiveIn(VA.getLocReg(), VReg);
+ ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
+ } else {
+ assert(VA.isMemLoc() && "Argument not register or memory");
+
+ // Create the frame index object for this incoming parameter.
+ int FI = MFI->CreateFixedObject(LocVT.getSizeInBits() / 8,
+ VA.getLocMemOffset(), true);
+
+ // Create the SelectionDAG nodes corresponding to a load
+ // from this parameter. Unpromoted ints and floats are
+ // passed as right-justified 8-byte values.
+ EVT PtrVT = getPointerTy();
+ SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
+ if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
+ FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4));
+ ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, false, 0);
+ }
+
+ // Convert the value of the argument register into the value that's
+ // being passed.
+ InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
+ }
+
+ if (IsVarArg) {
+ // Save the number of non-varargs registers for later use by va_start, etc.
+ FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
+ FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
+
+ // Likewise the address (in the form of a frame index) of where the
+ // first stack vararg would be. The 1-byte size here is arbitrary.
+ int64_t StackSize = CCInfo.getNextStackOffset();
+ FuncInfo->setVarArgsFrameIndex(MFI->CreateFixedObject(1, StackSize, true));
+
+ // ...and a similar frame index for the caller-allocated save area
+ // that will be used to store the incoming registers.
+ int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
+ unsigned RegSaveIndex = MFI->CreateFixedObject(1, RegSaveOffset, true);
+ FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
+
+ // Store the FPR varargs in the reserved frame slots. (We store the
+ // GPRs as part of the prologue.)
+ if (NumFixedFPRs < SystemZ::NumArgFPRs) {
+ SDValue MemOps[SystemZ::NumArgFPRs];
+ for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
+ unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
+ int FI = MFI->CreateFixedObject(8, RegSaveOffset + Offset, true);
+ SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
+ unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
+ &SystemZ::FP64BitRegClass);
+ SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
+ MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, 0);
+
+ }
+ // Join the stores, which are independent of one another.
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ &MemOps[NumFixedFPRs],
+ SystemZ::NumArgFPRs - NumFixedFPRs);
+ }
+ }
+
+ return Chain;
+}
+
+static bool canUseSiblingCall(CCState ArgCCInfo,
+ SmallVectorImpl<CCValAssign> &ArgLocs) {
+ // Punt if there are any indirect or stack arguments, or if the call
+ // needs the call-saved argument register R6.
+ for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
+ CCValAssign &VA = ArgLocs[I];
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return false;
+ if (!VA.isRegLoc())
+ return false;
+ unsigned Reg = VA.getLocReg();
+ if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
+ return false;
+ }
+ return true;
+}
+
+SDValue
+SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc &DL = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &IsTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool IsVarArg = CLI.IsVarArg;
+ MachineFunction &MF = DAG.getMachineFunction();
+ EVT PtrVT = getPointerTy();
+
+ // Analyze the operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
+
+ // We don't support GuaranteedTailCallOpt, only automatically-detected
+ // sibling calls.
+ if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs))
+ IsTailCall = false;
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = ArgCCInfo.getNextStackOffset();
+
+ // Mark the start of the call.
+ if (!IsTailCall)
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT, true),
+ DL);
+
+ // Copy argument values to their designated locations.
+ SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+ SDValue StackPtr;
+ for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
+ CCValAssign &VA = ArgLocs[I];
+ SDValue ArgValue = OutVals[I];
+
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
+ // Store the argument in a stack slot and pass its address.
+ SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
+ int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
+ MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, SpillSlot,
+ MachinePointerInfo::getFixedStack(FI),
+ false, false, 0));
+ ArgValue = SpillSlot;
+ } else
+ ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
+
+ if (VA.isRegLoc())
+ // Queue up the argument copies and emit them at the end.
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
+ else {
+ assert(VA.isMemLoc() && "Argument not register or memory");
+
+ // Work out the address of the stack slot. Unpromoted ints and
+ // floats are passed as right-justified 8-byte values.
+ if (!StackPtr.getNode())
+ StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
+ unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
+ if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
+ Offset += 4;
+ SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
+ DAG.getIntPtrConstant(Offset));
+
+ // Emit the store.
+ MemOpChains.push_back(DAG.getStore(Chain, DL, ArgValue, Address,
+ MachinePointerInfo(),
+ false, false, 0));
+ }
+ }
+
+ // Join the stores, which are independent of one another.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
+ &MemOpChains[0], MemOpChains.size());
+
+ // Accept direct calls by converting symbolic call addresses to the
+ // associated Target* opcodes. Force %r1 to be used for indirect
+ // tail calls.
+ SDValue Glue;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
+ Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
+ } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
+ Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
+ } else if (IsTailCall) {
+ Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
+ Glue = Chain.getValue(1);
+ Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
+ }
+
+ // Build a sequence of copy-to-reg nodes, chained and glued together.
+ for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
+ Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
+ RegsToPass[I].second, Glue);
+ Glue = Chain.getValue(1);
+ }
+
+ // The first call operand is the chain and the second is the target address.
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add argument registers to the end of the list so that they are
+ // known live into the call.
+ for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
+ Ops.push_back(DAG.getRegister(RegsToPass[I].first,
+ RegsToPass[I].second.getValueType()));
+
+ // Glue the call to the argument copies, if any.
+ if (Glue.getNode())
+ Ops.push_back(Glue);
+
+ // Emit the call.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ if (IsTailCall)
+ return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
+ Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
+ Glue = Chain.getValue(1);
+
+ // Mark the end of the call, which is glued to the call itself.
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getConstant(NumBytes, PtrVT, true),
+ DAG.getConstant(0, PtrVT, true),
+ Glue, DL);
+ Glue = Chain.getValue(1);
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RetLocs;
+ CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
+ CCValAssign &VA = RetLocs[I];
+
+ // Copy the value out, gluing the copy to the end of the call sequence.
+ SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
+ VA.getLocVT(), Glue);
+ Chain = RetValue.getValue(1);
+ Glue = RetValue.getValue(2);
+
+ // Convert the value of the return register into the value that's
+ // being returned.
+ InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
+ }
+
+ return Chain;
+}
+
+SDValue
+SystemZTargetLowering::LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc DL, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Assign locations to each returned value.
+ SmallVector<CCValAssign, 16> RetLocs;
+ CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
+
+ // Quick exit for void returns
+ if (RetLocs.empty())
+ return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
+
+ // Copy the result values into the output registers.
+ SDValue Glue;
+ SmallVector<SDValue, 4> RetOps;
+ RetOps.push_back(Chain);
+ for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
+ CCValAssign &VA = RetLocs[I];
+ SDValue RetValue = OutVals[I];
+
+ // Make the return register live on exit.
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ // Promote the value as required.
+ RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
+
+ // Chain and glue the copies together.
+ unsigned Reg = VA.getLocReg();
+ Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
+ Glue = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
+ }
+
+ // Update chain and glue.
+ RetOps[0] = Chain;
+ if (Glue.getNode())
+ RetOps.push_back(Glue);
+
+ return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other,
+ RetOps.data(), RetOps.size());
+}
+
+// CC is a comparison that will be implemented using an integer or
+// floating-point comparison. Return the condition code mask for
+// a branch on true. In the integer case, CCMASK_CMP_UO is set for
+// unsigned comparisons and clear for signed ones. In the floating-point
+// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
+static unsigned CCMaskForCondCode(ISD::CondCode CC) {
+#define CONV(X) \
+ case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
+ case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
+ case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
+
+ switch (CC) {
+ default:
+ llvm_unreachable("Invalid integer condition!");
+
+ CONV(EQ);
+ CONV(NE);
+ CONV(GT);
+ CONV(GE);
+ CONV(LT);
+ CONV(LE);
+
+ case ISD::SETO: return SystemZ::CCMASK_CMP_O;
+ case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
+ }
+#undef CONV
+}
+
+// Return a sequence for getting a 1 from an IPM result when CC has a
+// value in CCMask and a 0 when CC has a value in CCValid & ~CCMask.
+// The handling of CC values outside CCValid doesn't matter.
+static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
+ // Deal with cases where the result can be taken directly from a bit
+ // of the IPM result.
+ if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3)))
+ return IPMConversion(0, 0, SystemZ::IPM_CC);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3)))
+ return IPMConversion(0, 0, SystemZ::IPM_CC + 1);
+
+ // Deal with cases where we can add a value to force the sign bit
+ // to contain the right value. Putting the bit in 31 means we can
+ // use SRL rather than RISBG(L), and also makes it easier to get a
+ // 0/-1 value, so it has priority over the other tests below.
+ //
+ // These sequences rely on the fact that the upper two bits of the
+ // IPM result are zero.
+ uint64_t TopBit = uint64_t(1) << 31;
+ if (CCMask == (CCValid & SystemZ::CCMASK_0))
+ return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1)))
+ return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0
+ | SystemZ::CCMASK_1
+ | SystemZ::CCMASK_2)))
+ return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & SystemZ::CCMASK_3))
+ return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_1
+ | SystemZ::CCMASK_2
+ | SystemZ::CCMASK_3)))
+ return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31);
+
+ // Next try inverting the value and testing a bit. 0/1 could be
+ // handled this way too, but we dealt with that case above.
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2)))
+ return IPMConversion(-1, 0, SystemZ::IPM_CC);
+
+ // Handle cases where adding a value forces a non-sign bit to contain
+ // the right value.
+ if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2)))
+ return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
+ return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
+
+ // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are
+ // can be done by inverting the low CC bit and applying one of the
+ // sign-based extractions above.
+ if (CCMask == (CCValid & SystemZ::CCMASK_1))
+ return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & SystemZ::CCMASK_2))
+ return IPMConversion(1 << SystemZ::IPM_CC,
+ TopBit - (3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0
+ | SystemZ::CCMASK_1
+ | SystemZ::CCMASK_3)))
+ return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31);
+ if (CCMask == (CCValid & (SystemZ::CCMASK_0
+ | SystemZ::CCMASK_2
+ | SystemZ::CCMASK_3)))
+ return IPMConversion(1 << SystemZ::IPM_CC,
+ TopBit - (1 << SystemZ::IPM_CC), 31);
+
+ llvm_unreachable("Unexpected CC combination");
+}
+
+// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
+// can be converted to a comparison against zero, adjust the operands
+// as necessary.
+static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned,
+ SDValue &CmpOp0, SDValue &CmpOp1,
+ unsigned &CCMask) {
+ if (IsUnsigned)
+ return;
+
+ ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode());
+ if (!ConstOp1)
+ return;
+
+ int64_t Value = ConstOp1->getSExtValue();
+ if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) ||
+ (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) ||
+ (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) ||
+ (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) {
+ CCMask ^= SystemZ::CCMASK_CMP_EQ;
+ CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType());
+ }
+}
+
+// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
+// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary.
+static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
+ SDValue &CmpOp0, SDValue &CmpOp1,
+ unsigned &CCMask) {
+ // For us to make any changes, it must a comparison between a single-use
+ // load and a constant.
+ if (!CmpOp0.hasOneUse() ||
+ CmpOp0.getOpcode() != ISD::LOAD ||
+ CmpOp1.getOpcode() != ISD::Constant)
+ return;
+
+ // We must have an 8- or 16-bit load.
+ LoadSDNode *Load = cast<LoadSDNode>(CmpOp0);
+ unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
+ if (NumBits != 8 && NumBits != 16)
+ return;
+
+ // The load must be an extending one and the constant must be within the
+ // range of the unextended value.
+ ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1);
+ uint64_t Value = Constant->getZExtValue();
+ uint64_t Mask = (1 << NumBits) - 1;
+ if (Load->getExtensionType() == ISD::SEXTLOAD) {
+ int64_t SignedValue = Constant->getSExtValue();
+ if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask)
+ return;
+ // Unsigned comparison between two sign-extended values is equivalent
+ // to unsigned comparison between two zero-extended values.
+ if (IsUnsigned)
+ Value &= Mask;
+ else if (CCMask == SystemZ::CCMASK_CMP_EQ ||
+ CCMask == SystemZ::CCMASK_CMP_NE)
+ // Any choice of IsUnsigned is OK for equality comparisons.
+ // We could use either CHHSI or CLHHSI for 16-bit comparisons,
+ // but since we use CLHHSI for zero extensions, it seems better
+ // to be consistent and do the same here.
+ Value &= Mask, IsUnsigned = true;
+ else if (NumBits == 8) {
+ // Try to treat the comparison as unsigned, so that we can use CLI.
+ // Adjust CCMask and Value as necessary.
+ if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT)
+ // Test whether the high bit of the byte is set.
+ Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true;
+ else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE)
+ // Test whether the high bit of the byte is clear.
+ Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true;
+ else
+ // No instruction exists for this combination.
+ return;
+ }
+ } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
+ if (Value > Mask)
+ return;
+ // Signed comparison between two zero-extended values is equivalent
+ // to unsigned comparison.
+ IsUnsigned = true;
+ } else
+ return;
+
+ // Make sure that the first operand is an i32 of the right extension type.
+ ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
+ if (CmpOp0.getValueType() != MVT::i32 ||
+ Load->getExtensionType() != ExtType)
+ CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
+ Load->getChain(), Load->getBasePtr(),
+ Load->getPointerInfo(), Load->getMemoryVT(),
+ Load->isVolatile(), Load->isNonTemporal(),
+ Load->getAlignment());
+
+ // Make sure that the second operand is an i32 with the right value.
+ if (CmpOp1.getValueType() != MVT::i32 ||
+ Value != Constant->getZExtValue())
+ CmpOp1 = DAG.getConstant(Value, MVT::i32);
+}
+
+// Return true if Op is either an unextended load, or a load suitable
+// for integer register-memory comparisons of type ICmpType.
+static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
+ LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode());
+ if (Load) {
+ // There are no instructions to compare a register with a memory byte.
+ if (Load->getMemoryVT() == MVT::i8)
+ return false;
+ // Otherwise decide on extension type.
+ switch (Load->getExtensionType()) {
+ case ISD::NON_EXTLOAD:
+ return true;
+ case ISD::SEXTLOAD:
+ return ICmpType != SystemZICMP::UnsignedOnly;
+ case ISD::ZEXTLOAD:
+ return ICmpType != SystemZICMP::SignedOnly;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+// Return true if it is better to swap comparison operands Op0 and Op1.
+// ICmpType is the type of an integer comparison.
+static bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1,
+ unsigned ICmpType) {
+ // Leave f128 comparisons alone, since they have no memory forms.
+ if (Op0.getValueType() == MVT::f128)
+ return false;
+
+ // Always keep a floating-point constant second, since comparisons with
+ // zero can use LOAD TEST and comparisons with other constants make a
+ // natural memory operand.
+ if (isa<ConstantFPSDNode>(Op1))
+ return false;
+
+ // Never swap comparisons with zero since there are many ways to optimize
+ // those later.
+ ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
+ if (COp1 && COp1->getZExtValue() == 0)
+ return false;
+
+ // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
+ // In that case we generally prefer the memory to be second.
+ if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) &&
+ !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) {
+ // The only exceptions are when the second operand is a constant and
+ // we can use things like CHHSI.
+ if (!COp1)
+ return true;
+ // The unsigned memory-immediate instructions can handle 16-bit
+ // unsigned integers.
+ if (ICmpType != SystemZICMP::SignedOnly &&
+ isUInt<16>(COp1->getZExtValue()))
+ return false;
+ // The signed memory-immediate instructions can handle 16-bit
+ // signed integers.
+ if (ICmpType != SystemZICMP::UnsignedOnly &&
+ isInt<16>(COp1->getSExtValue()))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+// Return true if shift operation N has an in-range constant shift value.
+// Store it in ShiftVal if so.
+static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
+ ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (!Shift)
+ return false;
+
+ uint64_t Amount = Shift->getZExtValue();
+ if (Amount >= N.getValueType().getSizeInBits())
+ return false;
+
+ ShiftVal = Amount;
+ return true;
+}
+
+// Check whether an AND with Mask is suitable for a TEST UNDER MASK
+// instruction and whether the CC value is descriptive enough to handle
+// a comparison of type Opcode between the AND result and CmpVal.
+// CCMask says which comparison result is being tested and BitSize is
+// the number of bits in the operands. If TEST UNDER MASK can be used,
+// return the corresponding CC mask, otherwise return 0.
+static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
+ uint64_t Mask, uint64_t CmpVal,
+ unsigned ICmpType) {
+ assert(Mask != 0 && "ANDs with zero should have been removed by now");
+
+ // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
+ if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
+ !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
+ return 0;
+
+ // Work out the masks for the lowest and highest bits.
+ unsigned HighShift = 63 - countLeadingZeros(Mask);
+ uint64_t High = uint64_t(1) << HighShift;
+ uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
+
+ // Signed ordered comparisons are effectively unsigned if the sign
+ // bit is dropped.
+ bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
+
+ // Check for equality comparisons with 0, or the equivalent.
+ if (CmpVal == 0) {
+ if (CCMask == SystemZ::CCMASK_CMP_EQ)
+ return SystemZ::CCMASK_TM_ALL_0;
+ if (CCMask == SystemZ::CCMASK_CMP_NE)
+ return SystemZ::CCMASK_TM_SOME_1;
+ }
+ if (EffectivelyUnsigned && CmpVal <= Low) {
+ if (CCMask == SystemZ::CCMASK_CMP_LT)
+ return SystemZ::CCMASK_TM_ALL_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GE)
+ return SystemZ::CCMASK_TM_SOME_1;
+ }
+ if (EffectivelyUnsigned && CmpVal < Low) {
+ if (CCMask == SystemZ::CCMASK_CMP_LE)
+ return SystemZ::CCMASK_TM_ALL_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GT)
+ return SystemZ::CCMASK_TM_SOME_1;
+ }
+
+ // Check for equality comparisons with the mask, or the equivalent.
+ if (CmpVal == Mask) {
+ if (CCMask == SystemZ::CCMASK_CMP_EQ)
+ return SystemZ::CCMASK_TM_ALL_1;
+ if (CCMask == SystemZ::CCMASK_CMP_NE)
+ return SystemZ::CCMASK_TM_SOME_0;
+ }
+ if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
+ if (CCMask == SystemZ::CCMASK_CMP_GT)
+ return SystemZ::CCMASK_TM_ALL_1;
+ if (CCMask == SystemZ::CCMASK_CMP_LE)
+ return SystemZ::CCMASK_TM_SOME_0;
+ }
+ if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
+ if (CCMask == SystemZ::CCMASK_CMP_GE)
+ return SystemZ::CCMASK_TM_ALL_1;
+ if (CCMask == SystemZ::CCMASK_CMP_LT)
+ return SystemZ::CCMASK_TM_SOME_0;
+ }
+
+ // Check for ordered comparisons with the top bit.
+ if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
+ if (CCMask == SystemZ::CCMASK_CMP_LE)
+ return SystemZ::CCMASK_TM_MSB_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GT)
+ return SystemZ::CCMASK_TM_MSB_1;
+ }
+ if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
+ if (CCMask == SystemZ::CCMASK_CMP_LT)
+ return SystemZ::CCMASK_TM_MSB_0;
+ if (CCMask == SystemZ::CCMASK_CMP_GE)
+ return SystemZ::CCMASK_TM_MSB_1;
+ }
+
+ // If there are just two bits, we can do equality checks for Low and High
+ // as well.
+ if (Mask == Low + High) {
+ if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
+ return SystemZ::CCMASK_TM_MIXED_MSB_0;
+ if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
+ return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
+ if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
+ return SystemZ::CCMASK_TM_MIXED_MSB_1;
+ if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
+ return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
+ }
+
+ // Looks like we've exhausted our options.
+ return 0;
+}
+
+// See whether the comparison (Opcode CmpOp0, CmpOp1, ICmpType) can be
+// implemented as a TEST UNDER MASK instruction when the condition being
+// tested is as described by CCValid and CCMask. Update the arguments
+// with the TM version if so.
+static void adjustForTestUnderMask(SelectionDAG &DAG, unsigned &Opcode,
+ SDValue &CmpOp0, SDValue &CmpOp1,
+ unsigned &CCValid, unsigned &CCMask,
+ unsigned &ICmpType) {
+ // Check that we have a comparison with a constant.
+ ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1);
+ if (!ConstCmpOp1)
+ return;
+ uint64_t CmpVal = ConstCmpOp1->getZExtValue();
+
+ // Check whether the nonconstant input is an AND with a constant mask.
+ if (CmpOp0.getOpcode() != ISD::AND)
+ return;
+ SDValue AndOp0 = CmpOp0.getOperand(0);
+ SDValue AndOp1 = CmpOp0.getOperand(1);
+ ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode());
+ if (!Mask)
+ return;
+ uint64_t MaskVal = Mask->getZExtValue();
+
+ // Check whether the combination of mask, comparison value and comparison
+ // type are suitable.
+ unsigned BitSize = CmpOp0.getValueType().getSizeInBits();
+ unsigned NewCCMask, ShiftVal;
+ if (ICmpType != SystemZICMP::SignedOnly &&
+ AndOp0.getOpcode() == ISD::SHL &&
+ isSimpleShift(AndOp0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal >> ShiftVal,
+ CmpVal >> ShiftVal,
+ SystemZICMP::Any))) {
+ AndOp0 = AndOp0.getOperand(0);
+ AndOp1 = DAG.getConstant(MaskVal >> ShiftVal, AndOp0.getValueType());
+ } else if (ICmpType != SystemZICMP::SignedOnly &&
+ AndOp0.getOpcode() == ISD::SRL &&
+ isSimpleShift(AndOp0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, CCMask,
+ MaskVal << ShiftVal,
+ CmpVal << ShiftVal,
+ SystemZICMP::UnsignedOnly))) {
+ AndOp0 = AndOp0.getOperand(0);
+ AndOp1 = DAG.getConstant(MaskVal << ShiftVal, AndOp0.getValueType());
+ } else {
+ NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, CmpVal,
+ ICmpType);
+ if (!NewCCMask)
+ return;
+ }
+
+ // Go ahead and make the change.
+ Opcode = SystemZISD::TM;
+ CmpOp0 = AndOp0;
+ CmpOp1 = AndOp1;
+ ICmpType = (bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
+ bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
+ CCValid = SystemZ::CCMASK_TM;
+ CCMask = NewCCMask;
+}
+
+// Return a target node that compares CmpOp0 with CmpOp1 and stores a
+// 2-bit result in CC. Set CCValid to the CCMASK_* of all possible
+// 2-bit results and CCMask to the subset of those results that are
+// associated with Cond.
+static SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG,
+ SDLoc DL, SDValue CmpOp0, SDValue CmpOp1,
+ ISD::CondCode Cond, unsigned &CCValid,
+ unsigned &CCMask) {
+ bool IsUnsigned = false;
+ CCMask = CCMaskForCondCode(Cond);
+ unsigned Opcode, ICmpType = 0;
+ if (CmpOp0.getValueType().isFloatingPoint()) {
+ CCValid = SystemZ::CCMASK_FCMP;
+ Opcode = SystemZISD::FCMP;
+ } else {
+ IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO;
+ CCValid = SystemZ::CCMASK_ICMP;
+ CCMask &= CCValid;
+ adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
+ adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
+ Opcode = SystemZISD::ICMP;
+ // Choose the type of comparison. Equality and inequality tests can
+ // use either signed or unsigned comparisons. The choice also doesn't
+ // matter if both sign bits are known to be clear. In those cases we
+ // want to give the main isel code the freedom to choose whichever
+ // form fits best.
+ if (CCMask == SystemZ::CCMASK_CMP_EQ ||
+ CCMask == SystemZ::CCMASK_CMP_NE ||
+ (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1)))
+ ICmpType = SystemZICMP::Any;
+ else if (IsUnsigned)
+ ICmpType = SystemZICMP::UnsignedOnly;
+ else
+ ICmpType = SystemZICMP::SignedOnly;
+ }
+
+ if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) {
+ std::swap(CmpOp0, CmpOp1);
+ CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) |
+ (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_UO));
+ }
+
+ adjustForTestUnderMask(DAG, Opcode, CmpOp0, CmpOp1, CCValid, CCMask,
+ ICmpType);
+ if (Opcode == SystemZISD::ICMP || Opcode == SystemZISD::TM)
+ return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1,
+ DAG.getConstant(ICmpType, MVT::i32));
+ return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1);
+}
+
+// Implement a 32-bit *MUL_LOHI operation by extending both operands to
+// 64 bits. Extend is the extension type to use. Store the high part
+// in Hi and the low part in Lo.
+static void lowerMUL_LOHI32(SelectionDAG &DAG, SDLoc DL,
+ unsigned Extend, SDValue Op0, SDValue Op1,
+ SDValue &Hi, SDValue &Lo) {
+ Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
+ Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
+ SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
+ Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, DAG.getConstant(32, MVT::i64));
+ Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
+ Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
+}
+
+// Lower a binary operation that produces two VT results, one in each
+// half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
+// Extend extends Op0 to a GR128, and Opcode performs the GR128 operation
+// on the extended Op0 and (unextended) Op1. Store the even register result
+// in Even and the odd register result in Odd.
+static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
+ unsigned Extend, unsigned Opcode,
+ SDValue Op0, SDValue Op1,
+ SDValue &Even, SDValue &Odd) {
+ SDNode *In128 = DAG.getMachineNode(Extend, DL, MVT::Untyped, Op0);
+ SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped,
+ SDValue(In128, 0), Op1);
+ bool Is32Bit = is32Bit(VT);
+ Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
+ Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
+}
+
+SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CmpOp0 = Op.getOperand(0);
+ SDValue CmpOp1 = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SDLoc DL(Op);
+
+ unsigned CCValid, CCMask;
+ SDValue Glue = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+
+ IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
+ SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
+
+ if (Conversion.XORValue)
+ Result = DAG.getNode(ISD::XOR, DL, MVT::i32, Result,
+ DAG.getConstant(Conversion.XORValue, MVT::i32));
+
+ if (Conversion.AddValue)
+ Result = DAG.getNode(ISD::ADD, DL, MVT::i32, Result,
+ DAG.getConstant(Conversion.AddValue, MVT::i32));
+
+ // The SHR/AND sequence should get optimized to an RISBG.
+ Result = DAG.getNode(ISD::SRL, DL, MVT::i32, Result,
+ DAG.getConstant(Conversion.Bit, MVT::i32));
+ if (Conversion.Bit != 31)
+ Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
+ DAG.getConstant(1, MVT::i32));
+ return Result;
+}
+
+SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
+ SDValue CmpOp0 = Op.getOperand(2);
+ SDValue CmpOp1 = Op.getOperand(3);
+ SDValue Dest = Op.getOperand(4);
+ SDLoc DL(Op);
+
+ unsigned CCValid, CCMask;
+ SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+ return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
+ Chain, DAG.getConstant(CCValid, MVT::i32),
+ DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
+}
+
+SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CmpOp0 = Op.getOperand(0);
+ SDValue CmpOp1 = Op.getOperand(1);
+ SDValue TrueOp = Op.getOperand(2);
+ SDValue FalseOp = Op.getOperand(3);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
+ SDLoc DL(Op);
+
+ unsigned CCValid, CCMask;
+ SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+
+ SmallVector<SDValue, 5> Ops;
+ Ops.push_back(TrueOp);
+ Ops.push_back(FalseOp);
+ Ops.push_back(DAG.getConstant(CCValid, MVT::i32));
+ Ops.push_back(DAG.getConstant(CCMask, MVT::i32));
+ Ops.push_back(Flags);
+
+ SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
+ return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+}
+
+SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Node);
+ const GlobalValue *GV = Node->getGlobal();
+ int64_t Offset = Node->getOffset();
+ EVT PtrVT = getPointerTy();
+ Reloc::Model RM = TM.getRelocationModel();
+ CodeModel::Model CM = TM.getCodeModel();
+
+ SDValue Result;
+ if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
+ // Assign anchors at 1<<12 byte boundaries.
+ uint64_t Anchor = Offset & ~uint64_t(0xfff);
+ Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
+ Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
+
+ // The offset can be folded into the address if it is aligned to a halfword.
+ Offset -= Anchor;
+ if (Offset != 0 && (Offset & 1) == 0) {
+ SDValue Full = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
+ Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
+ Offset = 0;
+ }
+ } else {
+ Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
+ Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
+ Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
+ MachinePointerInfo::getGOT(), false, false, false, 0);
+ }
+
+ // If there was a non-zero offset that we didn't fold, create an explicit
+ // addition for it.
+ if (Offset != 0)
+ Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
+ DAG.getConstant(Offset, PtrVT));
+
+ return Result;
+}
+
+SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Node);
+ const GlobalValue *GV = Node->getGlobal();
+ EVT PtrVT = getPointerTy();
+ TLSModel::Model model = TM.getTLSModel(GV);
+
+ if (model != TLSModel::LocalExec)
+ llvm_unreachable("only local-exec TLS mode supported");
+
+ // The high part of the thread pointer is in access register 0.
+ SDValue TPHi = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
+ DAG.getConstant(0, MVT::i32));
+ TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
+
+ // The low part of the thread pointer is in access register 1.
+ SDValue TPLo = DAG.getNode(SystemZISD::EXTRACT_ACCESS, DL, MVT::i32,
+ DAG.getConstant(1, MVT::i32));
+ TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
+
+ // Merge them into a single 64-bit address.
+ SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
+ DAG.getConstant(32, PtrVT));
+ SDValue TP = DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
+
+ // Get the offset of GA from the thread pointer.
+ SystemZConstantPoolValue *CPV =
+ SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
+
+ // Force the offset into the constant pool and load it from there.
+ SDValue CPAddr = DAG.getConstantPool(CPV, PtrVT, 8);
+ SDValue Offset = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(),
+ CPAddr, MachinePointerInfo::getConstantPool(),
+ false, false, false, 0);
+
+ // Add the base and offset together.
+ return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
+}
+
+SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Node);
+ const BlockAddress *BA = Node->getBlockAddress();
+ int64_t Offset = Node->getOffset();
+ EVT PtrVT = getPointerTy();
+
+ SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
+ Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
+ return Result;
+}
+
+SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
+ SelectionDAG &DAG) const {
+ SDLoc DL(JT);
+ EVT PtrVT = getPointerTy();
+ SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
+
+ // Use LARL to load the address of the table.
+ return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
+}
+
+SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
+ SelectionDAG &DAG) const {
+ SDLoc DL(CP);
+ EVT PtrVT = getPointerTy();
+
+ SDValue Result;
+ if (CP->isMachineConstantPoolEntry())
+ Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
+ CP->getAlignment());
+ else
+ Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
+ CP->getAlignment(), CP->getOffset());
+
+ // Use LARL to load the address of the constant pool entry.
+ return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
+}
+
+SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue In = Op.getOperand(0);
+ EVT InVT = In.getValueType();
+ EVT ResVT = Op.getValueType();
+
+ if (InVT == MVT::i32 && ResVT == MVT::f32) {
+ SDValue In64;
+ if (Subtarget.hasHighWord()) {
+ SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
+ MVT::i64);
+ In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
+ MVT::i64, SDValue(U64, 0), In);
+ } else {
+ In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
+ In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
+ DAG.getConstant(32, MVT::i64));
+ }
+ SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
+ return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
+ DL, MVT::f32, Out64);
+ }
+ if (InVT == MVT::f32 && ResVT == MVT::i32) {
+ SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
+ SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
+ MVT::f64, SDValue(U64, 0), In);
+ SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
+ if (Subtarget.hasHighWord())
+ return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
+ MVT::i32, Out64);
+ SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
+ DAG.getConstant(32, MVT::i64));
+ return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
+ }
+ llvm_unreachable("Unexpected bitcast combination");
+}
+
+SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ SystemZMachineFunctionInfo *FuncInfo =
+ MF.getInfo<SystemZMachineFunctionInfo>();
+ EVT PtrVT = getPointerTy();
+
+ SDValue Chain = Op.getOperand(0);
+ SDValue Addr = Op.getOperand(1);
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ SDLoc DL(Op);
+
+ // The initial values of each field.
+ const unsigned NumFields = 4;
+ SDValue Fields[NumFields] = {
+ DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), PtrVT),
+ DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), PtrVT),
+ DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
+ DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
+ };
+
+ // Store each field into its respective slot.
+ SDValue MemOps[NumFields];
+ unsigned Offset = 0;
+ for (unsigned I = 0; I < NumFields; ++I) {
+ SDValue FieldAddr = Addr;
+ if (Offset != 0)
+ FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
+ DAG.getIntPtrConstant(Offset));
+ MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
+ MachinePointerInfo(SV, Offset),
+ false, false, 0);
+ Offset += 8;
+ }
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields);
+}
+
+SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue DstPtr = Op.getOperand(1);
+ SDValue SrcPtr = Op.getOperand(2);
+ const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
+ const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
+ SDLoc DL(Op);
+
+ return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32),
+ /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
+ MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
+}
+
+SDValue SystemZTargetLowering::
+lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Chain = Op.getOperand(0);
+ SDValue Size = Op.getOperand(1);
+ SDLoc DL(Op);
+
+ unsigned SPReg = getStackPointerRegisterToSaveRestore();
+
+ // Get a reference to the stack pointer.
+ SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
+
+ // Get the new stack pointer value.
+ SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, Size);
+
+ // Copy the new stack pointer back.
+ Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
+
+ // The allocated data lives above the 160 bytes allocated for the standard
+ // frame, plus any outgoing stack arguments. We don't know how much that
+ // amounts to yet, so emit a special ADJDYNALLOC placeholder.
+ SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
+ SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
+
+ SDValue Ops[2] = { Result, Chain };
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ SDValue Ops[2];
+ if (is32Bit(VT))
+ // Just do a normal 64-bit multiplication and extract the results.
+ // We define this so that it can be used for constant division.
+ lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
+ Op.getOperand(1), Ops[1], Ops[0]);
+ else {
+ // Do a full 128-bit multiplication based on UMUL_LOHI64:
+ //
+ // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
+ //
+ // but using the fact that the upper halves are either all zeros
+ // or all ones:
+ //
+ // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
+ //
+ // and grouping the right terms together since they are quicker than the
+ // multiplication:
+ //
+ // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
+ SDValue C63 = DAG.getConstant(63, MVT::i64);
+ SDValue LL = Op.getOperand(0);
+ SDValue RL = Op.getOperand(1);
+ SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
+ SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
+ // UMUL_LOHI64 returns the low result in the odd register and the high
+ // result in the even register. SMUL_LOHI is defined to return the
+ // low half first, so the results are in reverse order.
+ lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
+ LL, RL, Ops[1], Ops[0]);
+ SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
+ SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
+ SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
+ Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
+ }
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ SDValue Ops[2];
+ if (is32Bit(VT))
+ // Just do a normal 64-bit multiplication and extract the results.
+ // We define this so that it can be used for constant division.
+ lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
+ Op.getOperand(1), Ops[1], Ops[0]);
+ else
+ // UMUL_LOHI64 returns the low result in the odd register and the high
+ // result in the even register. UMUL_LOHI is defined to return the
+ // low half first, so the results are in reverse order.
+ lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
+ Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ unsigned Opcode;
+
+ // We use DSGF for 32-bit division.
+ if (is32Bit(VT)) {
+ Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
+ Opcode = SystemZISD::SDIVREM32;
+ } else if (DAG.ComputeNumSignBits(Op1) > 32) {
+ Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
+ Opcode = SystemZISD::SDIVREM32;
+ } else
+ Opcode = SystemZISD::SDIVREM64;
+
+ // DSG(F) takes a 64-bit dividend, so the even register in the GR128
+ // input is "don't care". The instruction returns the remainder in
+ // the even register and the quotient in the odd register.
+ SDValue Ops[2];
+ lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
+ Op0, Op1, Ops[1], Ops[0]);
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
+ SelectionDAG &DAG) const {
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+
+ // DL(G) uses a double-width dividend, so we need to clear the even
+ // register in the GR128 input. The instruction returns the remainder
+ // in the even register and the quotient in the odd register.
+ SDValue Ops[2];
+ if (is32Bit(VT))
+ lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_32, SystemZISD::UDIVREM32,
+ Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
+ else
+ lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
+ Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
+ return DAG.getMergeValues(Ops, 2, DL);
+}
+
+SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
+ assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation");
+
+ // Get the known-zero masks for each operand.
+ SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
+ APInt KnownZero[2], KnownOne[2];
+ DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
+ DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
+
+ // See if the upper 32 bits of one operand and the lower 32 bits of the
+ // other are known zero. They are the low and high operands respectively.
+ uint64_t Masks[] = { KnownZero[0].getZExtValue(),
+ KnownZero[1].getZExtValue() };
+ unsigned High, Low;
+ if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
+ High = 1, Low = 0;
+ else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
+ High = 0, Low = 1;
+ else
+ return Op;
+
+ SDValue LowOp = Ops[Low];
+ SDValue HighOp = Ops[High];
+
+ // If the high part is a constant, we're better off using IILH.
+ if (HighOp.getOpcode() == ISD::Constant)
+ return Op;
+
+ // If the low part is a constant that is outside the range of LHI,
+ // then we're better off using IILF.
+ if (LowOp.getOpcode() == ISD::Constant) {
+ int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
+ if (!isInt<16>(Value))
+ return Op;
+ }
+
+ // Check whether the high part is an AND that doesn't change the
+ // high 32 bits and just masks out low bits. We can skip it if so.
+ if (HighOp.getOpcode() == ISD::AND &&
+ HighOp.getOperand(1).getOpcode() == ISD::Constant) {
+ SDValue HighOp0 = HighOp.getOperand(0);
+ uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
+ if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
+ HighOp = HighOp0;
+ }
+
+ // Take advantage of the fact that all GR32 operations only change the
+ // low 32 bits by truncating Low to an i32 and inserting it directly
+ // using a subreg. The interesting cases are those where the truncation
+ // can be folded.
+ SDLoc DL(Op);
+ SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
+ return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
+ MVT::i64, HighOp, Low32);
+}
+
+// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
+// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
+SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
+ SelectionDAG &DAG,
+ unsigned Opcode) const {
+ AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
+
+ // 32-bit operations need no code outside the main loop.
+ EVT NarrowVT = Node->getMemoryVT();
+ EVT WideVT = MVT::i32;
+ if (NarrowVT == WideVT)
+ return Op;
+
+ int64_t BitSize = NarrowVT.getSizeInBits();
+ SDValue ChainIn = Node->getChain();
+ SDValue Addr = Node->getBasePtr();
+ SDValue Src2 = Node->getVal();
+ MachineMemOperand *MMO = Node->getMemOperand();
+ SDLoc DL(Node);
+ EVT PtrVT = Addr.getValueType();
+
+ // Convert atomic subtracts of constants into additions.
+ if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
+ if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) {
+ Opcode = SystemZISD::ATOMIC_LOADW_ADD;
+ Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType());
+ }
+
+ // Get the address of the containing word.
+ SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
+ DAG.getConstant(-4, PtrVT));
+
+ // Get the number of bits that the word must be rotated left in order
+ // to bring the field to the top bits of a GR32.
+ SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
+ DAG.getConstant(3, PtrVT));
+ BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
+
+ // Get the complementing shift amount, for rotating a field in the top
+ // bits back to its proper position.
+ SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
+ DAG.getConstant(0, WideVT), BitShift);
+
+ // Extend the source operand to 32 bits and prepare it for the inner loop.
+ // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
+ // operations require the source to be shifted in advance. (This shift
+ // can be folded if the source is constant.) For AND and NAND, the lower
+ // bits must be set, while for other opcodes they should be left clear.
+ if (Opcode != SystemZISD::ATOMIC_SWAPW)
+ Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
+ DAG.getConstant(32 - BitSize, WideVT));
+ if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
+ Opcode == SystemZISD::ATOMIC_LOADW_NAND)
+ Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
+ DAG.getConstant(uint32_t(-1) >> BitSize, WideVT));
+
+ // Construct the ATOMIC_LOADW_* node.
+ SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
+ SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
+ DAG.getConstant(BitSize, WideVT) };
+ SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
+ array_lengthof(Ops),
+ NarrowVT, MMO);
+
+ // Rotate the result of the final CS so that the field is in the lower
+ // bits of a GR32, then truncate it.
+ SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
+ DAG.getConstant(BitSize, WideVT));
+ SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
+
+ SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
+ return DAG.getMergeValues(RetOps, 2, DL);
+}
+
+// Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
+// into a fullword ATOMIC_CMP_SWAPW operation.
+SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
+ SelectionDAG &DAG) const {
+ AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
+
+ // We have native support for 32-bit compare and swap.
+ EVT NarrowVT = Node->getMemoryVT();
+ EVT WideVT = MVT::i32;
+ if (NarrowVT == WideVT)
+ return Op;
+
+ int64_t BitSize = NarrowVT.getSizeInBits();
+ SDValue ChainIn = Node->getOperand(0);
+ SDValue Addr = Node->getOperand(1);
+ SDValue CmpVal = Node->getOperand(2);
+ SDValue SwapVal = Node->getOperand(3);
+ MachineMemOperand *MMO = Node->getMemOperand();
+ SDLoc DL(Node);
+ EVT PtrVT = Addr.getValueType();
+
+ // Get the address of the containing word.
+ SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
+ DAG.getConstant(-4, PtrVT));
+
+ // Get the number of bits that the word must be rotated left in order
+ // to bring the field to the top bits of a GR32.
+ SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
+ DAG.getConstant(3, PtrVT));
+ BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
+
+ // Get the complementing shift amount, for rotating a field in the top
+ // bits back to its proper position.
+ SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
+ DAG.getConstant(0, WideVT), BitShift);
+
+ // Construct the ATOMIC_CMP_SWAPW node.
+ SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
+ SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
+ NegBitShift, DAG.getConstant(BitSize, WideVT) };
+ SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
+ VTList, Ops, array_lengthof(Ops),
+ NarrowVT, MMO);
+ return AtomicOp;
+}
+
+SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
+ return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
+ SystemZ::R15D, Op.getValueType());
+}
+
+SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
+ SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
+ return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op),
+ SystemZ::R15D, Op.getOperand(1));
+}
+
+SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
+ SelectionDAG &DAG) const {
+ bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
+ if (!IsData)
+ // Just preserve the chain.
+ return Op.getOperand(0);
+
+ bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
+ MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode());
+ SDValue Ops[] = {
+ Op.getOperand(0),
+ DAG.getConstant(Code, MVT::i32),
+ Op.getOperand(1)
+ };
+ return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
+ Node->getVTList(), Ops, array_lengthof(Ops),
+ Node->getMemoryVT(), Node->getMemOperand());
+}
+
+SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::BR_CC:
+ return lowerBR_CC(Op, DAG);
+ case ISD::SELECT_CC:
+ return lowerSELECT_CC(Op, DAG);
+ case ISD::SETCC:
+ return lowerSETCC(Op, DAG);
+ case ISD::GlobalAddress:
+ return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
+ case ISD::GlobalTLSAddress:
+ return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
+ case ISD::BlockAddress:
+ return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
+ case ISD::JumpTable:
+ return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
+ case ISD::ConstantPool:
+ return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
+ case ISD::BITCAST:
+ return lowerBITCAST(Op, DAG);
+ case ISD::VASTART:
+ return lowerVASTART(Op, DAG);
+ case ISD::VACOPY:
+ return lowerVACOPY(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC:
+ return lowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::SMUL_LOHI:
+ return lowerSMUL_LOHI(Op, DAG);
+ case ISD::UMUL_LOHI:
+ return lowerUMUL_LOHI(Op, DAG);
+ case ISD::SDIVREM:
+ return lowerSDIVREM(Op, DAG);
+ case ISD::UDIVREM:
+ return lowerUDIVREM(Op, DAG);
+ case ISD::OR:
+ return lowerOR(Op, DAG);
+ case ISD::ATOMIC_SWAP:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW);
+ case ISD::ATOMIC_LOAD_ADD:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
+ case ISD::ATOMIC_LOAD_SUB:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
+ case ISD::ATOMIC_LOAD_AND:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
+ case ISD::ATOMIC_LOAD_OR:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
+ case ISD::ATOMIC_LOAD_XOR:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
+ case ISD::ATOMIC_LOAD_NAND:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
+ case ISD::ATOMIC_LOAD_MIN:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
+ case ISD::ATOMIC_LOAD_MAX:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
+ case ISD::ATOMIC_LOAD_UMIN:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
+ case ISD::ATOMIC_LOAD_UMAX:
+ return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
+ case ISD::ATOMIC_CMP_SWAP:
+ return lowerATOMIC_CMP_SWAP(Op, DAG);
+ case ISD::STACKSAVE:
+ return lowerSTACKSAVE(Op, DAG);
+ case ISD::STACKRESTORE:
+ return lowerSTACKRESTORE(Op, DAG);
+ case ISD::PREFETCH:
+ return lowerPREFETCH(Op, DAG);
+ default:
+ llvm_unreachable("Unexpected node to lower");
+ }
+}
+
+const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
+#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
+ switch (Opcode) {
+ OPCODE(RET_FLAG);
+ OPCODE(CALL);
+ OPCODE(SIBCALL);
+ OPCODE(PCREL_WRAPPER);
+ OPCODE(PCREL_OFFSET);
+ OPCODE(ICMP);
+ OPCODE(FCMP);
+ OPCODE(TM);
+ OPCODE(BR_CCMASK);
+ OPCODE(SELECT_CCMASK);
+ OPCODE(ADJDYNALLOC);
+ OPCODE(EXTRACT_ACCESS);
+ OPCODE(UMUL_LOHI64);
+ OPCODE(SDIVREM64);
+ OPCODE(UDIVREM32);
+ OPCODE(UDIVREM64);
+ OPCODE(MVC);
+ OPCODE(MVC_LOOP);
+ OPCODE(NC);
+ OPCODE(NC_LOOP);
+ OPCODE(OC);
+ OPCODE(OC_LOOP);
+ OPCODE(XC);
+ OPCODE(XC_LOOP);
+ OPCODE(CLC);
+ OPCODE(CLC_LOOP);
+ OPCODE(STRCMP);
+ OPCODE(STPCPY);
+ OPCODE(SEARCH_STRING);
+ OPCODE(IPM);
+ OPCODE(ATOMIC_SWAPW);
+ OPCODE(ATOMIC_LOADW_ADD);
+ OPCODE(ATOMIC_LOADW_SUB);
+ OPCODE(ATOMIC_LOADW_AND);
+ OPCODE(ATOMIC_LOADW_OR);
+ OPCODE(ATOMIC_LOADW_XOR);
+ OPCODE(ATOMIC_LOADW_NAND);
+ OPCODE(ATOMIC_LOADW_MIN);
+ OPCODE(ATOMIC_LOADW_MAX);
+ OPCODE(ATOMIC_LOADW_UMIN);
+ OPCODE(ATOMIC_LOADW_UMAX);
+ OPCODE(ATOMIC_CMP_SWAPW);
+ OPCODE(PREFETCH);
+ }
+ return NULL;
+#undef OPCODE
+}
+
+//===----------------------------------------------------------------------===//
+// Custom insertion
+//===----------------------------------------------------------------------===//
+
+// Create a new basic block after MBB.
+static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
+ MachineFunction &MF = *MBB->getParent();
+ MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
+ MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
+ return NewMBB;
+}
+
+// Split MBB after MI and return the new block (the one that contains
+// instructions after MI).
+static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
+ NewMBB->splice(NewMBB->begin(), MBB,
+ llvm::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
+ return NewMBB;
+}
+
+// Split MBB before MI and return the new block (the one that contains MI).
+static MachineBasicBlock *splitBlockBefore(MachineInstr *MI,
+ MachineBasicBlock *MBB) {
+ MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
+ NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
+ NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
+ return NewMBB;
+}
+
+// Force base value Base into a register before MI. Return the register.
+static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
+ const SystemZInstrInfo *TII) {
+ if (Base.isReg())
+ return Base.getReg();
+
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LA), Reg)
+ .addOperand(Base).addImm(0).addReg(0);
+ return Reg;
+}
+
+// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
+MachineBasicBlock *
+SystemZTargetLowering::emitSelect(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+
+ unsigned DestReg = MI->getOperand(0).getReg();
+ unsigned TrueReg = MI->getOperand(1).getReg();
+ unsigned FalseReg = MI->getOperand(2).getReg();
+ unsigned CCValid = MI->getOperand(3).getImm();
+ unsigned CCMask = MI->getOperand(4).getImm();
+ DebugLoc DL = MI->getDebugLoc();
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
+
+ // StartMBB:
+ // BRC CCMask, JoinMBB
+ // # fallthrough to FalseMBB
+ MBB = StartMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
+ MBB->addSuccessor(JoinMBB);
+ MBB->addSuccessor(FalseMBB);
+
+ // FalseMBB:
+ // # fallthrough to JoinMBB
+ MBB = FalseMBB;
+ MBB->addSuccessor(JoinMBB);
+
+ // JoinMBB:
+ // %Result = phi [ %FalseReg, FalseMBB ], [ %TrueReg, StartMBB ]
+ // ...
+ MBB = JoinMBB;
+ BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
+ .addReg(TrueReg).addMBB(StartMBB)
+ .addReg(FalseReg).addMBB(FalseMBB);
+
+ MI->eraseFromParent();
+ return JoinMBB;
+}
+
+// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
+// StoreOpcode is the store to use and Invert says whether the store should
+// happen when the condition is false rather than true. If a STORE ON
+// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
+MachineBasicBlock *
+SystemZTargetLowering::emitCondStore(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned StoreOpcode, unsigned STOCOpcode,
+ bool Invert) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+
+ unsigned SrcReg = MI->getOperand(0).getReg();
+ MachineOperand Base = MI->getOperand(1);
+ int64_t Disp = MI->getOperand(2).getImm();
+ unsigned IndexReg = MI->getOperand(3).getReg();
+ unsigned CCValid = MI->getOperand(4).getImm();
+ unsigned CCMask = MI->getOperand(5).getImm();
+ DebugLoc DL = MI->getDebugLoc();
+
+ StoreOpcode = TII->getOpcodeForOffset(StoreOpcode, Disp);
+
+ // Use STOCOpcode if possible. We could use different store patterns in
+ // order to avoid matching the index register, but the performance trade-offs
+ // might be more complicated in that case.
+ if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (Invert)
+ CCMask ^= CCValid;
+ BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
+ .addReg(SrcReg).addOperand(Base).addImm(Disp)
+ .addImm(CCValid).addImm(CCMask);
+ MI->eraseFromParent();
+ return MBB;
+ }
+
+ // Get the condition needed to branch around the store.
+ if (!Invert)
+ CCMask ^= CCValid;
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *JoinMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *FalseMBB = emitBlockAfter(StartMBB);
+
+ // StartMBB:
+ // BRC CCMask, JoinMBB
+ // # fallthrough to FalseMBB
+ MBB = StartMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(CCValid).addImm(CCMask).addMBB(JoinMBB);
+ MBB->addSuccessor(JoinMBB);
+ MBB->addSuccessor(FalseMBB);
+
+ // FalseMBB:
+ // store %SrcReg, %Disp(%Index,%Base)
+ // # fallthrough to JoinMBB
+ MBB = FalseMBB;
+ BuildMI(MBB, DL, TII->get(StoreOpcode))
+ .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg);
+ MBB->addSuccessor(JoinMBB);
+
+ MI->eraseFromParent();
+ return JoinMBB;
+}
+
+// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_LOAD{,W}_*
+// or ATOMIC_SWAP{,W} instruction MI. BinOpcode is the instruction that
+// performs the binary operation elided by "*", or 0 for ATOMIC_SWAP{,W}.
+// BitSize is the width of the field in bits, or 0 if this is a partword
+// ATOMIC_LOADW_* or ATOMIC_SWAPW instruction, in which case the bitsize
+// is one of the operands. Invert says whether the field should be
+// inverted after performing BinOpcode (e.g. for NAND).
+MachineBasicBlock *
+SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned BinOpcode,
+ unsigned BitSize,
+ bool Invert) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ bool IsSubWord = (BitSize < 32);
+
+ // Extract the operands. Base can be a register or a frame index.
+ // Src2 can be a register or immediate.
+ unsigned Dest = MI->getOperand(0).getReg();
+ MachineOperand Base = earlyUseOperand(MI->getOperand(1));
+ int64_t Disp = MI->getOperand(2).getImm();
+ MachineOperand Src2 = earlyUseOperand(MI->getOperand(3));
+ unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
+ unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
+ DebugLoc DL = MI->getDebugLoc();
+ if (IsSubWord)
+ BitSize = MI->getOperand(6).getImm();
+
+ // Subword operations use 32-bit registers.
+ const TargetRegisterClass *RC = (BitSize <= 32 ?
+ &SystemZ::GR32BitRegClass :
+ &SystemZ::GR64BitRegClass);
+ unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
+ unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
+
+ // Get the right opcodes for the displacement.
+ LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
+ CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
+ assert(LOpcode && CSOpcode && "Displacement out of range");
+
+ // Create virtual registers for temporary results.
+ unsigned OrigVal = MRI.createVirtualRegister(RC);
+ unsigned OldVal = MRI.createVirtualRegister(RC);
+ unsigned NewVal = (BinOpcode || IsSubWord ?
+ MRI.createVirtualRegister(RC) : Src2.getReg());
+ unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
+ unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
+
+ // Insert a basic block for the main loop.
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
+
+ // StartMBB:
+ // ...
+ // %OrigVal = L Disp(%Base)
+ // # fall through to LoopMMB
+ MBB = StartMBB;
+ BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
+ .addOperand(Base).addImm(Disp).addReg(0);
+ MBB->addSuccessor(LoopMBB);
+
+ // LoopMBB:
+ // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, LoopMBB ]
+ // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
+ // %RotatedNewVal = OP %RotatedOldVal, %Src2
+ // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
+ // %Dest = CS %OldVal, %NewVal, Disp(%Base)
+ // JNE LoopMBB
+ // # fall through to DoneMMB
+ MBB = LoopMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
+ .addReg(OrigVal).addMBB(StartMBB)
+ .addReg(Dest).addMBB(LoopMBB);
+ if (IsSubWord)
+ BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
+ .addReg(OldVal).addReg(BitShift).addImm(0);
+ if (Invert) {
+ // Perform the operation normally and then invert every bit of the field.
+ unsigned Tmp = MRI.createVirtualRegister(RC);
+ BuildMI(MBB, DL, TII->get(BinOpcode), Tmp)
+ .addReg(RotatedOldVal).addOperand(Src2);
+ if (BitSize < 32)
+ // XILF with the upper BitSize bits set.
+ BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
+ .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize)));
+ else if (BitSize == 32)
+ // XILF with every bit set.
+ BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
+ .addReg(Tmp).addImm(~uint32_t(0));
+ else {
+ // Use LCGR and add -1 to the result, which is more compact than
+ // an XILF, XILH pair.
+ unsigned Tmp2 = MRI.createVirtualRegister(RC);
+ BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
+ BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
+ .addReg(Tmp2).addImm(-1);
+ }
+ } else if (BinOpcode)
+ // A simply binary operation.
+ BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
+ .addReg(RotatedOldVal).addOperand(Src2);
+ else if (IsSubWord)
+ // Use RISBG to rotate Src2 into position and use it to replace the
+ // field in RotatedOldVal.
+ BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
+ .addReg(RotatedOldVal).addReg(Src2.getReg())
+ .addImm(32).addImm(31 + BitSize).addImm(32 - BitSize);
+ if (IsSubWord)
+ BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
+ .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
+ BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
+ .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
+ MBB->addSuccessor(LoopMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ MI->eraseFromParent();
+ return DoneMBB;
+}
+
+// Implement EmitInstrWithCustomInserter for pseudo
+// ATOMIC_LOAD{,W}_{,U}{MIN,MAX} instruction MI. CompareOpcode is the
+// instruction that should be used to compare the current field with the
+// minimum or maximum value. KeepOldMask is the BRC condition-code mask
+// for when the current field should be kept. BitSize is the width of
+// the field in bits, or 0 if this is a partword ATOMIC_LOADW_* instruction.
+MachineBasicBlock *
+SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned CompareOpcode,
+ unsigned KeepOldMask,
+ unsigned BitSize) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ bool IsSubWord = (BitSize < 32);
+
+ // Extract the operands. Base can be a register or a frame index.
+ unsigned Dest = MI->getOperand(0).getReg();
+ MachineOperand Base = earlyUseOperand(MI->getOperand(1));
+ int64_t Disp = MI->getOperand(2).getImm();
+ unsigned Src2 = MI->getOperand(3).getReg();
+ unsigned BitShift = (IsSubWord ? MI->getOperand(4).getReg() : 0);
+ unsigned NegBitShift = (IsSubWord ? MI->getOperand(5).getReg() : 0);
+ DebugLoc DL = MI->getDebugLoc();
+ if (IsSubWord)
+ BitSize = MI->getOperand(6).getImm();
+
+ // Subword operations use 32-bit registers.
+ const TargetRegisterClass *RC = (BitSize <= 32 ?
+ &SystemZ::GR32BitRegClass :
+ &SystemZ::GR64BitRegClass);
+ unsigned LOpcode = BitSize <= 32 ? SystemZ::L : SystemZ::LG;
+ unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
+
+ // Get the right opcodes for the displacement.
+ LOpcode = TII->getOpcodeForOffset(LOpcode, Disp);
+ CSOpcode = TII->getOpcodeForOffset(CSOpcode, Disp);
+ assert(LOpcode && CSOpcode && "Displacement out of range");
+
+ // Create virtual registers for temporary results.
+ unsigned OrigVal = MRI.createVirtualRegister(RC);
+ unsigned OldVal = MRI.createVirtualRegister(RC);
+ unsigned NewVal = MRI.createVirtualRegister(RC);
+ unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
+ unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
+ unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
+
+ // Insert 3 basic blocks for the loop.
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
+ MachineBasicBlock *UseAltMBB = emitBlockAfter(LoopMBB);
+ MachineBasicBlock *UpdateMBB = emitBlockAfter(UseAltMBB);
+
+ // StartMBB:
+ // ...
+ // %OrigVal = L Disp(%Base)
+ // # fall through to LoopMMB
+ MBB = StartMBB;
+ BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
+ .addOperand(Base).addImm(Disp).addReg(0);
+ MBB->addSuccessor(LoopMBB);
+
+ // LoopMBB:
+ // %OldVal = phi [ %OrigVal, StartMBB ], [ %Dest, UpdateMBB ]
+ // %RotatedOldVal = RLL %OldVal, 0(%BitShift)
+ // CompareOpcode %RotatedOldVal, %Src2
+ // BRC KeepOldMask, UpdateMBB
+ MBB = LoopMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
+ .addReg(OrigVal).addMBB(StartMBB)
+ .addReg(Dest).addMBB(UpdateMBB);
+ if (IsSubWord)
+ BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
+ .addReg(OldVal).addReg(BitShift).addImm(0);
+ BuildMI(MBB, DL, TII->get(CompareOpcode))
+ .addReg(RotatedOldVal).addReg(Src2);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(KeepOldMask).addMBB(UpdateMBB);
+ MBB->addSuccessor(UpdateMBB);
+ MBB->addSuccessor(UseAltMBB);
+
+ // UseAltMBB:
+ // %RotatedAltVal = RISBG %RotatedOldVal, %Src2, 32, 31 + BitSize, 0
+ // # fall through to UpdateMMB
+ MBB = UseAltMBB;
+ if (IsSubWord)
+ BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
+ .addReg(RotatedOldVal).addReg(Src2)
+ .addImm(32).addImm(31 + BitSize).addImm(0);
+ MBB->addSuccessor(UpdateMBB);
+
+ // UpdateMBB:
+ // %RotatedNewVal = PHI [ %RotatedOldVal, LoopMBB ],
+ // [ %RotatedAltVal, UseAltMBB ]
+ // %NewVal = RLL %RotatedNewVal, 0(%NegBitShift)
+ // %Dest = CS %OldVal, %NewVal, Disp(%Base)
+ // JNE LoopMBB
+ // # fall through to DoneMMB
+ MBB = UpdateMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
+ .addReg(RotatedOldVal).addMBB(LoopMBB)
+ .addReg(RotatedAltVal).addMBB(UseAltMBB);
+ if (IsSubWord)
+ BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
+ .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0);
+ BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
+ .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
+ MBB->addSuccessor(LoopMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ MI->eraseFromParent();
+ return DoneMBB;
+}
+
+// Implement EmitInstrWithCustomInserter for pseudo ATOMIC_CMP_SWAPW
+// instruction MI.
+MachineBasicBlock *
+SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ // Extract the operands. Base can be a register or a frame index.
+ unsigned Dest = MI->getOperand(0).getReg();
+ MachineOperand Base = earlyUseOperand(MI->getOperand(1));
+ int64_t Disp = MI->getOperand(2).getImm();
+ unsigned OrigCmpVal = MI->getOperand(3).getReg();
+ unsigned OrigSwapVal = MI->getOperand(4).getReg();
+ unsigned BitShift = MI->getOperand(5).getReg();
+ unsigned NegBitShift = MI->getOperand(6).getReg();
+ int64_t BitSize = MI->getOperand(7).getImm();
+ DebugLoc DL = MI->getDebugLoc();
+
+ const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
+
+ // Get the right opcodes for the displacement.
+ unsigned LOpcode = TII->getOpcodeForOffset(SystemZ::L, Disp);
+ unsigned CSOpcode = TII->getOpcodeForOffset(SystemZ::CS, Disp);
+ assert(LOpcode && CSOpcode && "Displacement out of range");
+
+ // Create virtual registers for temporary results.
+ unsigned OrigOldVal = MRI.createVirtualRegister(RC);
+ unsigned OldVal = MRI.createVirtualRegister(RC);
+ unsigned CmpVal = MRI.createVirtualRegister(RC);
+ unsigned SwapVal = MRI.createVirtualRegister(RC);
+ unsigned StoreVal = MRI.createVirtualRegister(RC);
+ unsigned RetryOldVal = MRI.createVirtualRegister(RC);
+ unsigned RetryCmpVal = MRI.createVirtualRegister(RC);
+ unsigned RetrySwapVal = MRI.createVirtualRegister(RC);
+
+ // Insert 2 basic blocks for the loop.
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
+ MachineBasicBlock *SetMBB = emitBlockAfter(LoopMBB);
+
+ // StartMBB:
+ // ...
+ // %OrigOldVal = L Disp(%Base)
+ // # fall through to LoopMMB
+ MBB = StartMBB;
+ BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
+ .addOperand(Base).addImm(Disp).addReg(0);
+ MBB->addSuccessor(LoopMBB);
+
+ // LoopMBB:
+ // %OldVal = phi [ %OrigOldVal, EntryBB ], [ %RetryOldVal, SetMBB ]
+ // %CmpVal = phi [ %OrigCmpVal, EntryBB ], [ %RetryCmpVal, SetMBB ]
+ // %SwapVal = phi [ %OrigSwapVal, EntryBB ], [ %RetrySwapVal, SetMBB ]
+ // %Dest = RLL %OldVal, BitSize(%BitShift)
+ // ^^ The low BitSize bits contain the field
+ // of interest.
+ // %RetryCmpVal = RISBG32 %CmpVal, %Dest, 32, 63-BitSize, 0
+ // ^^ Replace the upper 32-BitSize bits of the
+ // comparison value with those that we loaded,
+ // so that we can use a full word comparison.
+ // CR %Dest, %RetryCmpVal
+ // JNE DoneMBB
+ // # Fall through to SetMBB
+ MBB = LoopMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
+ .addReg(OrigOldVal).addMBB(StartMBB)
+ .addReg(RetryOldVal).addMBB(SetMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
+ .addReg(OrigCmpVal).addMBB(StartMBB)
+ .addReg(RetryCmpVal).addMBB(SetMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
+ .addReg(OrigSwapVal).addMBB(StartMBB)
+ .addReg(RetrySwapVal).addMBB(SetMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
+ .addReg(OldVal).addReg(BitShift).addImm(BitSize);
+ BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
+ .addReg(CmpVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
+ BuildMI(MBB, DL, TII->get(SystemZ::CR))
+ .addReg(Dest).addReg(RetryCmpVal);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP)
+ .addImm(SystemZ::CCMASK_CMP_NE).addMBB(DoneMBB);
+ MBB->addSuccessor(DoneMBB);
+ MBB->addSuccessor(SetMBB);
+
+ // SetMBB:
+ // %RetrySwapVal = RISBG32 %SwapVal, %Dest, 32, 63-BitSize, 0
+ // ^^ Replace the upper 32-BitSize bits of the new
+ // value with those that we loaded.
+ // %StoreVal = RLL %RetrySwapVal, -BitSize(%NegBitShift)
+ // ^^ Rotate the new field to its proper position.
+ // %RetryOldVal = CS %Dest, %StoreVal, Disp(%Base)
+ // JNE LoopMBB
+ // # fall through to ExitMMB
+ MBB = SetMBB;
+ BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
+ .addReg(SwapVal).addReg(Dest).addImm(32).addImm(63 - BitSize).addImm(0);
+ BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
+ .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize);
+ BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
+ .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB);
+ MBB->addSuccessor(LoopMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ MI->eraseFromParent();
+ return DoneMBB;
+}
+
+// Emit an extension from a GR32 or GR64 to a GR128. ClearEven is true
+// if the high register of the GR128 value must be cleared or false if
+// it's "don't care". SubReg is subreg_l32 when extending a GR32
+// and subreg_l64 when extending a GR64.
+MachineBasicBlock *
+SystemZTargetLowering::emitExt128(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ bool ClearEven, unsigned SubReg) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ unsigned Dest = MI->getOperand(0).getReg();
+ unsigned Src = MI->getOperand(1).getReg();
+ unsigned In128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
+
+ BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
+ if (ClearEven) {
+ unsigned NewIn128 = MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
+ unsigned Zero64 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
+
+ BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
+ .addImm(0);
+ BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
+ .addReg(In128).addReg(Zero64).addImm(SystemZ::subreg_h64);
+ In128 = NewIn128;
+ }
+ BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
+ .addReg(In128).addReg(Src).addImm(SubReg);
+
+ MI->eraseFromParent();
+ return MBB;
+}
+
+MachineBasicBlock *
+SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned Opcode) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ MachineOperand DestBase = earlyUseOperand(MI->getOperand(0));
+ uint64_t DestDisp = MI->getOperand(1).getImm();
+ MachineOperand SrcBase = earlyUseOperand(MI->getOperand(2));
+ uint64_t SrcDisp = MI->getOperand(3).getImm();
+ uint64_t Length = MI->getOperand(4).getImm();
+
+ // When generating more than one CLC, all but the last will need to
+ // branch to the end when a difference is found.
+ MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
+ splitBlockAfter(MI, MBB) : 0);
+
+ // Check for the loop form, in which operand 5 is the trip count.
+ if (MI->getNumExplicitOperands() > 5) {
+ bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
+
+ uint64_t StartCountReg = MI->getOperand(5).getReg();
+ uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
+ uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
+ forceReg(MI, DestBase, TII));
+
+ const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
+ uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
+ uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
+ MRI.createVirtualRegister(RC));
+ uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
+ uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
+ MRI.createVirtualRegister(RC));
+
+ RC = &SystemZ::GR64BitRegClass;
+ uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
+ uint64_t NextCountReg = MRI.createVirtualRegister(RC);
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
+ MachineBasicBlock *NextMBB = (EndMBB ? emitBlockAfter(LoopMBB) : LoopMBB);
+
+ // StartMBB:
+ // # fall through to LoopMMB
+ MBB->addSuccessor(LoopMBB);
+
+ // LoopMBB:
+ // %ThisDestReg = phi [ %StartDestReg, StartMBB ],
+ // [ %NextDestReg, NextMBB ]
+ // %ThisSrcReg = phi [ %StartSrcReg, StartMBB ],
+ // [ %NextSrcReg, NextMBB ]
+ // %ThisCountReg = phi [ %StartCountReg, StartMBB ],
+ // [ %NextCountReg, NextMBB ]
+ // ( PFD 2, 768+DestDisp(%ThisDestReg) )
+ // Opcode DestDisp(256,%ThisDestReg), SrcDisp(%ThisSrcReg)
+ // ( JLH EndMBB )
+ //
+ // The prefetch is used only for MVC. The JLH is used only for CLC.
+ MBB = LoopMBB;
+
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
+ .addReg(StartDestReg).addMBB(StartMBB)
+ .addReg(NextDestReg).addMBB(NextMBB);
+ if (!HaveSingleBase)
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
+ .addReg(StartSrcReg).addMBB(StartMBB)
+ .addReg(NextSrcReg).addMBB(NextMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
+ .addReg(StartCountReg).addMBB(StartMBB)
+ .addReg(NextCountReg).addMBB(NextMBB);
+ if (Opcode == SystemZ::MVC)
+ BuildMI(MBB, DL, TII->get(SystemZ::PFD))
+ .addImm(SystemZ::PFD_WRITE)
+ .addReg(ThisDestReg).addImm(DestDisp + 768).addReg(0);
+ BuildMI(MBB, DL, TII->get(Opcode))
+ .addReg(ThisDestReg).addImm(DestDisp).addImm(256)
+ .addReg(ThisSrcReg).addImm(SrcDisp);
+ if (EndMBB) {
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
+ .addMBB(EndMBB);
+ MBB->addSuccessor(EndMBB);
+ MBB->addSuccessor(NextMBB);
+ }
+
+ // NextMBB:
+ // %NextDestReg = LA 256(%ThisDestReg)
+ // %NextSrcReg = LA 256(%ThisSrcReg)
+ // %NextCountReg = AGHI %ThisCountReg, -1
+ // CGHI %NextCountReg, 0
+ // JLH LoopMBB
+ // # fall through to DoneMMB
+ //
+ // The AGHI, CGHI and JLH should be converted to BRCTG by later passes.
+ MBB = NextMBB;
+
+ BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
+ .addReg(ThisDestReg).addImm(256).addReg(0);
+ if (!HaveSingleBase)
+ BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
+ .addReg(ThisSrcReg).addImm(256).addReg(0);
+ BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
+ .addReg(ThisCountReg).addImm(-1);
+ BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
+ .addReg(NextCountReg).addImm(0);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
+ .addMBB(LoopMBB);
+ MBB->addSuccessor(LoopMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ DestBase = MachineOperand::CreateReg(NextDestReg, false);
+ SrcBase = MachineOperand::CreateReg(NextSrcReg, false);
+ Length &= 255;
+ MBB = DoneMBB;
+ }
+ // Handle any remaining bytes with straight-line code.
+ while (Length > 0) {
+ uint64_t ThisLength = std::min(Length, uint64_t(256));
+ // The previous iteration might have created out-of-range displacements.
+ // Apply them using LAY if so.
+ if (!isUInt<12>(DestDisp)) {
+ unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
+ .addOperand(DestBase).addImm(DestDisp).addReg(0);
+ DestBase = MachineOperand::CreateReg(Reg, false);
+ DestDisp = 0;
+ }
+ if (!isUInt<12>(SrcDisp)) {
+ unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(SystemZ::LAY), Reg)
+ .addOperand(SrcBase).addImm(SrcDisp).addReg(0);
+ SrcBase = MachineOperand::CreateReg(Reg, false);
+ SrcDisp = 0;
+ }
+ BuildMI(*MBB, MI, DL, TII->get(Opcode))
+ .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength)
+ .addOperand(SrcBase).addImm(SrcDisp);
+ DestDisp += ThisLength;
+ SrcDisp += ThisLength;
+ Length -= ThisLength;
+ // If there's another CLC to go, branch to the end if a difference
+ // was found.
+ if (EndMBB && Length > 0) {
+ MachineBasicBlock *NextMBB = splitBlockBefore(MI, MBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ICMP).addImm(SystemZ::CCMASK_CMP_NE)
+ .addMBB(EndMBB);
+ MBB->addSuccessor(EndMBB);
+ MBB->addSuccessor(NextMBB);
+ MBB = NextMBB;
+ }
+ }
+ if (EndMBB) {
+ MBB->addSuccessor(EndMBB);
+ MBB = EndMBB;
+ MBB->addLiveIn(SystemZ::CC);
+ }
+
+ MI->eraseFromParent();
+ return MBB;
+}
+
+// Decompose string pseudo-instruction MI into a loop that continually performs
+// Opcode until CC != 3.
+MachineBasicBlock *
+SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned Opcode) const {
+ const SystemZInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MBB->getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ DebugLoc DL = MI->getDebugLoc();
+
+ uint64_t End1Reg = MI->getOperand(0).getReg();
+ uint64_t Start1Reg = MI->getOperand(1).getReg();
+ uint64_t Start2Reg = MI->getOperand(2).getReg();
+ uint64_t CharReg = MI->getOperand(3).getReg();
+
+ const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
+ uint64_t This1Reg = MRI.createVirtualRegister(RC);
+ uint64_t This2Reg = MRI.createVirtualRegister(RC);
+ uint64_t End2Reg = MRI.createVirtualRegister(RC);
+
+ MachineBasicBlock *StartMBB = MBB;
+ MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
+ MachineBasicBlock *LoopMBB = emitBlockAfter(StartMBB);
+
+ // StartMBB:
+ // # fall through to LoopMMB
+ MBB->addSuccessor(LoopMBB);
+
+ // LoopMBB:
+ // %This1Reg = phi [ %Start1Reg, StartMBB ], [ %End1Reg, LoopMBB ]
+ // %This2Reg = phi [ %Start2Reg, StartMBB ], [ %End2Reg, LoopMBB ]
+ // R0L = %CharReg
+ // %End1Reg, %End2Reg = CLST %This1Reg, %This2Reg -- uses R0L
+ // JO LoopMBB
+ // # fall through to DoneMMB
+ //
+ // The load of R0L can be hoisted by post-RA LICM.
+ MBB = LoopMBB;
+
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
+ .addReg(Start1Reg).addMBB(StartMBB)
+ .addReg(End1Reg).addMBB(LoopMBB);
+ BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
+ .addReg(Start2Reg).addMBB(StartMBB)
+ .addReg(End2Reg).addMBB(LoopMBB);
+ BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
+ BuildMI(MBB, DL, TII->get(Opcode))
+ .addReg(End1Reg, RegState::Define).addReg(End2Reg, RegState::Define)
+ .addReg(This1Reg).addReg(This2Reg);
+ BuildMI(MBB, DL, TII->get(SystemZ::BRC))
+ .addImm(SystemZ::CCMASK_ANY).addImm(SystemZ::CCMASK_3).addMBB(LoopMBB);
+ MBB->addSuccessor(LoopMBB);
+ MBB->addSuccessor(DoneMBB);
+
+ DoneMBB->addLiveIn(SystemZ::CC);
+
+ MI->eraseFromParent();
+ return DoneMBB;
+}
+
+MachineBasicBlock *SystemZTargetLowering::
+EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
+ switch (MI->getOpcode()) {
+ case SystemZ::Select32Mux:
+ case SystemZ::Select32:
+ case SystemZ::SelectF32:
+ case SystemZ::Select64:
+ case SystemZ::SelectF64:
+ case SystemZ::SelectF128:
+ return emitSelect(MI, MBB);
+
+ case SystemZ::CondStore8Mux:
+ return emitCondStore(MI, MBB, SystemZ::STCMux, 0, false);
+ case SystemZ::CondStore8MuxInv:
+ return emitCondStore(MI, MBB, SystemZ::STCMux, 0, true);
+ case SystemZ::CondStore16Mux:
+ return emitCondStore(MI, MBB, SystemZ::STHMux, 0, false);
+ case SystemZ::CondStore16MuxInv:
+ return emitCondStore(MI, MBB, SystemZ::STHMux, 0, true);
+ case SystemZ::CondStore8:
+ return emitCondStore(MI, MBB, SystemZ::STC, 0, false);
+ case SystemZ::CondStore8Inv:
+ return emitCondStore(MI, MBB, SystemZ::STC, 0, true);
+ case SystemZ::CondStore16:
+ return emitCondStore(MI, MBB, SystemZ::STH, 0, false);
+ case SystemZ::CondStore16Inv:
+ return emitCondStore(MI, MBB, SystemZ::STH, 0, true);
+ case SystemZ::CondStore32:
+ return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, false);
+ case SystemZ::CondStore32Inv:
+ return emitCondStore(MI, MBB, SystemZ::ST, SystemZ::STOC, true);
+ case SystemZ::CondStore64:
+ return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, false);
+ case SystemZ::CondStore64Inv:
+ return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG, true);
+ case SystemZ::CondStoreF32:
+ return emitCondStore(MI, MBB, SystemZ::STE, 0, false);
+ case SystemZ::CondStoreF32Inv:
+ return emitCondStore(MI, MBB, SystemZ::STE, 0, true);
+ case SystemZ::CondStoreF64:
+ return emitCondStore(MI, MBB, SystemZ::STD, 0, false);
+ case SystemZ::CondStoreF64Inv:
+ return emitCondStore(MI, MBB, SystemZ::STD, 0, true);
+
+ case SystemZ::AEXT128_64:
+ return emitExt128(MI, MBB, false, SystemZ::subreg_l64);
+ case SystemZ::ZEXT128_32:
+ return emitExt128(MI, MBB, true, SystemZ::subreg_l32);
+ case SystemZ::ZEXT128_64:
+ return emitExt128(MI, MBB, true, SystemZ::subreg_l64);
+
+ case SystemZ::ATOMIC_SWAPW:
+ return emitAtomicLoadBinary(MI, MBB, 0, 0);
+ case SystemZ::ATOMIC_SWAP_32:
+ return emitAtomicLoadBinary(MI, MBB, 0, 32);
+ case SystemZ::ATOMIC_SWAP_64:
+ return emitAtomicLoadBinary(MI, MBB, 0, 64);
+
+ case SystemZ::ATOMIC_LOADW_AR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
+ case SystemZ::ATOMIC_LOADW_AFI:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
+ case SystemZ::ATOMIC_LOAD_AR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
+ case SystemZ::ATOMIC_LOAD_AHI:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
+ case SystemZ::ATOMIC_LOAD_AFI:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
+ case SystemZ::ATOMIC_LOAD_AGR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
+ case SystemZ::ATOMIC_LOAD_AGHI:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
+ case SystemZ::ATOMIC_LOAD_AGFI:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
+
+ case SystemZ::ATOMIC_LOADW_SR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
+ case SystemZ::ATOMIC_LOAD_SR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
+ case SystemZ::ATOMIC_LOAD_SGR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
+
+ case SystemZ::ATOMIC_LOADW_NR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
+ case SystemZ::ATOMIC_LOADW_NILH:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
+ case SystemZ::ATOMIC_LOAD_NR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
+ case SystemZ::ATOMIC_LOAD_NILL:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
+ case SystemZ::ATOMIC_LOAD_NILH:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
+ case SystemZ::ATOMIC_LOAD_NILF:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
+ case SystemZ::ATOMIC_LOAD_NGR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
+ case SystemZ::ATOMIC_LOAD_NILL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
+ case SystemZ::ATOMIC_LOAD_NILH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
+ case SystemZ::ATOMIC_LOAD_NIHL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
+ case SystemZ::ATOMIC_LOAD_NIHH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
+ case SystemZ::ATOMIC_LOAD_NILF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
+ case SystemZ::ATOMIC_LOAD_NIHF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
+
+ case SystemZ::ATOMIC_LOADW_OR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
+ case SystemZ::ATOMIC_LOADW_OILH:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
+ case SystemZ::ATOMIC_LOAD_OR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
+ case SystemZ::ATOMIC_LOAD_OILL:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
+ case SystemZ::ATOMIC_LOAD_OILH:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
+ case SystemZ::ATOMIC_LOAD_OILF:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
+ case SystemZ::ATOMIC_LOAD_OGR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
+ case SystemZ::ATOMIC_LOAD_OILL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
+ case SystemZ::ATOMIC_LOAD_OILH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
+ case SystemZ::ATOMIC_LOAD_OIHL64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
+ case SystemZ::ATOMIC_LOAD_OIHH64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
+ case SystemZ::ATOMIC_LOAD_OILF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
+ case SystemZ::ATOMIC_LOAD_OIHF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
+
+ case SystemZ::ATOMIC_LOADW_XR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
+ case SystemZ::ATOMIC_LOADW_XILF:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
+ case SystemZ::ATOMIC_LOAD_XR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
+ case SystemZ::ATOMIC_LOAD_XILF:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
+ case SystemZ::ATOMIC_LOAD_XGR:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
+ case SystemZ::ATOMIC_LOAD_XILF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
+ case SystemZ::ATOMIC_LOAD_XIHF64:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
+
+ case SystemZ::ATOMIC_LOADW_NRi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
+ case SystemZ::ATOMIC_LOADW_NILHi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
+ case SystemZ::ATOMIC_LOAD_NRi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
+ case SystemZ::ATOMIC_LOAD_NILLi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
+ case SystemZ::ATOMIC_LOAD_NILHi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
+ case SystemZ::ATOMIC_LOAD_NILFi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
+ case SystemZ::ATOMIC_LOAD_NGRi:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
+ case SystemZ::ATOMIC_LOAD_NILL64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NILH64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NIHL64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NIHH64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NILF64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
+ case SystemZ::ATOMIC_LOAD_NIHF64i:
+ return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64, true);
+
+ case SystemZ::ATOMIC_LOADW_MIN:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
+ SystemZ::CCMASK_CMP_LE, 0);
+ case SystemZ::ATOMIC_LOAD_MIN_32:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
+ SystemZ::CCMASK_CMP_LE, 32);
+ case SystemZ::ATOMIC_LOAD_MIN_64:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
+ SystemZ::CCMASK_CMP_LE, 64);
+
+ case SystemZ::ATOMIC_LOADW_MAX:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
+ SystemZ::CCMASK_CMP_GE, 0);
+ case SystemZ::ATOMIC_LOAD_MAX_32:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
+ SystemZ::CCMASK_CMP_GE, 32);
+ case SystemZ::ATOMIC_LOAD_MAX_64:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
+ SystemZ::CCMASK_CMP_GE, 64);
+
+ case SystemZ::ATOMIC_LOADW_UMIN:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
+ SystemZ::CCMASK_CMP_LE, 0);
+ case SystemZ::ATOMIC_LOAD_UMIN_32:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
+ SystemZ::CCMASK_CMP_LE, 32);
+ case SystemZ::ATOMIC_LOAD_UMIN_64:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
+ SystemZ::CCMASK_CMP_LE, 64);
+
+ case SystemZ::ATOMIC_LOADW_UMAX:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
+ SystemZ::CCMASK_CMP_GE, 0);
+ case SystemZ::ATOMIC_LOAD_UMAX_32:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
+ SystemZ::CCMASK_CMP_GE, 32);
+ case SystemZ::ATOMIC_LOAD_UMAX_64:
+ return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
+ SystemZ::CCMASK_CMP_GE, 64);
+
+ case SystemZ::ATOMIC_CMP_SWAPW:
+ return emitAtomicCmpSwapW(MI, MBB);
+ case SystemZ::MVCSequence:
+ case SystemZ::MVCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::MVC);
+ case SystemZ::NCSequence:
+ case SystemZ::NCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::NC);
+ case SystemZ::OCSequence:
+ case SystemZ::OCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::OC);
+ case SystemZ::XCSequence:
+ case SystemZ::XCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::XC);
+ case SystemZ::CLCSequence:
+ case SystemZ::CLCLoop:
+ return emitMemMemWrapper(MI, MBB, SystemZ::CLC);
+ case SystemZ::CLSTLoop:
+ return emitStringWrapper(MI, MBB, SystemZ::CLST);
+ case SystemZ::MVSTLoop:
+ return emitStringWrapper(MI, MBB, SystemZ::MVST);
+ case SystemZ::SRSTLoop:
+ return emitStringWrapper(MI, MBB, SystemZ::SRST);
+ default:
+ llvm_unreachable("Unexpected instr type to insert");
+ }
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
new file mode 100644
index 000000000000..c6dcca6982a6
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -0,0 +1,319 @@
+//===-- SystemZISelLowering.h - SystemZ DAG lowering interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that SystemZ uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_SystemZ_ISELLOWERING_H
+#define LLVM_TARGET_SystemZ_ISELLOWERING_H
+
+#include "SystemZ.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+namespace SystemZISD {
+ enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+
+ // Return with a flag operand. Operand 0 is the chain operand.
+ RET_FLAG,
+
+ // Calls a function. Operand 0 is the chain operand and operand 1
+ // is the target address. The arguments start at operand 2.
+ // There is an optional glue operand at the end.
+ CALL,
+ SIBCALL,
+
+ // Wraps a TargetGlobalAddress that should be loaded using PC-relative
+ // accesses (LARL). Operand 0 is the address.
+ PCREL_WRAPPER,
+
+ // Used in cases where an offset is applied to a TargetGlobalAddress.
+ // Operand 0 is the full TargetGlobalAddress and operand 1 is a
+ // PCREL_WRAPPER for an anchor point. This is used so that we can
+ // cheaply refer to either the full address or the anchor point
+ // as a register base.
+ PCREL_OFFSET,
+
+ // Integer comparisons. There are three operands: the two values
+ // to compare, and an integer of type SystemZICMP.
+ ICMP,
+
+ // Floating-point comparisons. The two operands are the values to compare.
+ FCMP,
+
+ // Test under mask. The first operand is ANDed with the second operand
+ // and the condition codes are set on the result. The third operand is
+ // a boolean that is true if the condition codes need to distinguish
+ // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
+ // register forms do but the memory forms don't).
+ TM,
+
+ // Branches if a condition is true. Operand 0 is the chain operand;
+ // operand 1 is the 4-bit condition-code mask, with bit N in
+ // big-endian order meaning "branch if CC=N"; operand 2 is the
+ // target block and operand 3 is the flag operand.
+ BR_CCMASK,
+
+ // Selects between operand 0 and operand 1. Operand 2 is the
+ // mask of condition-code values for which operand 0 should be
+ // chosen over operand 1; it has the same form as BR_CCMASK.
+ // Operand 3 is the flag operand.
+ SELECT_CCMASK,
+
+ // Evaluates to the gap between the stack pointer and the
+ // base of the dynamically-allocatable area.
+ ADJDYNALLOC,
+
+ // Extracts the value of a 32-bit access register. Operand 0 is
+ // the number of the register.
+ EXTRACT_ACCESS,
+
+ // Wrappers around the ISD opcodes of the same name. The output and
+ // first input operands are GR128s. The trailing numbers are the
+ // widths of the second operand in bits.
+ UMUL_LOHI64,
+ SDIVREM32,
+ SDIVREM64,
+ UDIVREM32,
+ UDIVREM64,
+
+ // Use a series of MVCs to copy bytes from one memory location to another.
+ // The operands are:
+ // - the target address
+ // - the source address
+ // - the constant length
+ //
+ // This isn't a memory opcode because we'd need to attach two
+ // MachineMemOperands rather than one.
+ MVC,
+
+ // Like MVC, but implemented as a loop that handles X*256 bytes
+ // followed by straight-line code to handle the rest (if any).
+ // The value of X is passed as an additional operand.
+ MVC_LOOP,
+
+ // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
+ NC,
+ NC_LOOP,
+ OC,
+ OC_LOOP,
+ XC,
+ XC_LOOP,
+
+ // Use CLC to compare two blocks of memory, with the same comments
+ // as for MVC and MVC_LOOP.
+ CLC,
+ CLC_LOOP,
+
+ // Use an MVST-based sequence to implement stpcpy().
+ STPCPY,
+
+ // Use a CLST-based sequence to implement strcmp(). The two input operands
+ // are the addresses of the strings to compare.
+ STRCMP,
+
+ // Use an SRST-based sequence to search a block of memory. The first
+ // operand is the end address, the second is the start, and the third
+ // is the character to search for. CC is set to 1 on success and 2
+ // on failure.
+ SEARCH_STRING,
+
+ // Store the CC value in bits 29 and 28 of an integer.
+ IPM,
+
+ // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
+ // ATOMIC_LOAD_<op>.
+ //
+ // Operand 0: the address of the containing 32-bit-aligned field
+ // Operand 1: the second operand of <op>, in the high bits of an i32
+ // for everything except ATOMIC_SWAPW
+ // Operand 2: how many bits to rotate the i32 left to bring the first
+ // operand into the high bits
+ // Operand 3: the negative of operand 2, for rotating the other way
+ // Operand 4: the width of the field in bits (8 or 16)
+ ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ ATOMIC_LOADW_ADD,
+ ATOMIC_LOADW_SUB,
+ ATOMIC_LOADW_AND,
+ ATOMIC_LOADW_OR,
+ ATOMIC_LOADW_XOR,
+ ATOMIC_LOADW_NAND,
+ ATOMIC_LOADW_MIN,
+ ATOMIC_LOADW_MAX,
+ ATOMIC_LOADW_UMIN,
+ ATOMIC_LOADW_UMAX,
+
+ // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
+ //
+ // Operand 0: the address of the containing 32-bit-aligned field
+ // Operand 1: the compare value, in the low bits of an i32
+ // Operand 2: the swap value, in the low bits of an i32
+ // Operand 3: how many bits to rotate the i32 left to bring the first
+ // operand into the high bits
+ // Operand 4: the negative of operand 2, for rotating the other way
+ // Operand 5: the width of the field in bits (8 or 16)
+ ATOMIC_CMP_SWAPW,
+
+ // Prefetch from the second operand using the 4-bit control code in
+ // the first operand. The code is 1 for a load prefetch and 2 for
+ // a store prefetch.
+ PREFETCH
+ };
+
+ // Return true if OPCODE is some kind of PC-relative address.
+ inline bool isPCREL(unsigned Opcode) {
+ return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
+ }
+}
+
+namespace SystemZICMP {
+ // Describes whether an integer comparison needs to be signed or unsigned,
+ // or whether either type is OK.
+ enum {
+ Any,
+ UnsignedOnly,
+ SignedOnly
+ };
+}
+
+class SystemZSubtarget;
+class SystemZTargetMachine;
+
+class SystemZTargetLowering : public TargetLowering {
+public:
+ explicit SystemZTargetLowering(SystemZTargetMachine &TM);
+
+ // Override TargetLowering.
+ virtual MVT getScalarShiftAmountTy(EVT LHSTy) const LLVM_OVERRIDE {
+ return MVT::i32;
+ }
+ virtual EVT getSetCCResultType(LLVMContext &, EVT) const LLVM_OVERRIDE;
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const LLVM_OVERRIDE;
+ virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const LLVM_OVERRIDE;
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
+ LLVM_OVERRIDE;
+ virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
+ LLVM_OVERRIDE;
+ virtual bool isTruncateFree(Type *, Type *) const LLVM_OVERRIDE;
+ virtual bool isTruncateFree(EVT, EVT) const LLVM_OVERRIDE;
+ virtual const char *getTargetNodeName(unsigned Opcode) const LLVM_OVERRIDE;
+ virtual std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const std::string &Constraint,
+ MVT VT) const LLVM_OVERRIDE;
+ virtual TargetLowering::ConstraintType
+ getConstraintType(const std::string &Constraint) const LLVM_OVERRIDE;
+ virtual TargetLowering::ConstraintWeight
+ getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const LLVM_OVERRIDE;
+ virtual void
+ LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const LLVM_OVERRIDE;
+ virtual MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const LLVM_OVERRIDE;
+ virtual SDValue LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const LLVM_OVERRIDE;
+ virtual bool allowTruncateForTailCall(Type *, Type *) const LLVM_OVERRIDE;
+ virtual bool mayBeEmittedAsTailCall(CallInst *CI) const LLVM_OVERRIDE;
+ virtual SDValue
+ LowerFormalArguments(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE;
+ virtual SDValue
+ LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE;
+
+ virtual SDValue
+ LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc DL, SelectionDAG &DAG) const LLVM_OVERRIDE;
+
+private:
+ const SystemZSubtarget &Subtarget;
+ const SystemZTargetMachine &TM;
+
+ // Implement LowerOperation for individual opcodes.
+ SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerGlobalAddress(GlobalAddressSDNode *Node,
+ SelectionDAG &DAG) const;
+ SDValue lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
+ SelectionDAG &DAG) const;
+ SDValue lowerBlockAddress(BlockAddressSDNode *Node,
+ SelectionDAG &DAG) const;
+ SDValue lowerJumpTable(JumpTableSDNode *JT, SelectionDAG &DAG) const;
+ SDValue lowerConstantPool(ConstantPoolSDNode *CP, SelectionDAG &DAG) const;
+ SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSDIVREM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG,
+ unsigned Opcode) const;
+ SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
+
+ // If the last instruction before MBBI in MBB was some form of COMPARE,
+ // try to replace it with a COMPARE AND BRANCH just before MBBI.
+ // CCMask and Target are the BRC-like operands for the branch.
+ // Return true if the change was made.
+ bool convertPrevCompareToBranch(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned CCMask,
+ MachineBasicBlock *Target) const;
+
+ // Implement EmitInstrWithCustomInserter for individual operation types.
+ MachineBasicBlock *emitSelect(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitCondStore(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned StoreOpcode, unsigned STOCOpcode,
+ bool Invert) const;
+ MachineBasicBlock *emitExt128(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ bool ClearEven, unsigned SubReg) const;
+ MachineBasicBlock *emitAtomicLoadBinary(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned BinOpcode, unsigned BitSize,
+ bool Invert = false) const;
+ MachineBasicBlock *emitAtomicLoadMinMax(MachineInstr *MI,
+ MachineBasicBlock *MBB,
+ unsigned CompareOpcode,
+ unsigned KeepOldMask,
+ unsigned BitSize) const;
+ MachineBasicBlock *emitAtomicCmpSwapW(MachineInstr *MI,
+ MachineBasicBlock *BB) const;
+ MachineBasicBlock *emitMemMemWrapper(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned Opcode) const;
+ MachineBasicBlock *emitStringWrapper(MachineInstr *MI,
+ MachineBasicBlock *BB,
+ unsigned Opcode) const;
+};
+} // end namespace llvm
+
+#endif // LLVM_TARGET_SystemZ_ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
new file mode 100644
index 000000000000..fb699b9ab8d7
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
@@ -0,0 +1,48 @@
+//===-- SystemZInstrBuilder.h - Functions to aid building insts -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes functions that may be used with BuildMI from the
+// MachineInstrBuilder.h file to handle SystemZ'isms in a clean way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZINSTRBUILDER_H
+#define SYSTEMZINSTRBUILDER_H
+
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+
+namespace llvm {
+
+/// Add a BDX memory reference for frame object FI to MIB.
+static inline const MachineInstrBuilder &
+addFrameReference(const MachineInstrBuilder &MIB, int FI) {
+ MachineInstr *MI = MIB;
+ MachineFunction &MF = *MI->getParent()->getParent();
+ MachineFrameInfo *MFFrame = MF.getFrameInfo();
+ const MCInstrDesc &MCID = MI->getDesc();
+ unsigned Flags = 0;
+ if (MCID.mayLoad())
+ Flags |= MachineMemOperand::MOLoad;
+ if (MCID.mayStore())
+ Flags |= MachineMemOperand::MOStore;
+ int64_t Offset = 0;
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo(
+ PseudoSourceValue::getFixedStack(FI), Offset),
+ Flags, MFFrame->getObjectSize(FI),
+ MFFrame->getObjectAlignment(FI));
+ return MIB.addFrameIndex(FI).addImm(Offset).addReg(0).addMemOperand(MMO);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
new file mode 100644
index 000000000000..60800460fca7
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
@@ -0,0 +1,366 @@
+//==- SystemZInstrFP.td - Floating-point SystemZ instructions --*- tblgen-*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Select instructions
+//===----------------------------------------------------------------------===//
+
+// C's ?: operator for floating-point operands.
+def SelectF32 : SelectWrapper<FP32>;
+def SelectF64 : SelectWrapper<FP64>;
+def SelectF128 : SelectWrapper<FP128>;
+
+defm CondStoreF32 : CondStores<FP32, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+defm CondStoreF64 : CondStores<FP64, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
+//===----------------------------------------------------------------------===//
+// Move instructions
+//===----------------------------------------------------------------------===//
+
+// Load zero.
+let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in {
+ def LZER : InherentRRE<"lzer", 0xB374, FP32, (fpimm0)>;
+ def LZDR : InherentRRE<"lzdr", 0xB375, FP64, (fpimm0)>;
+ def LZXR : InherentRRE<"lzxr", 0xB376, FP128, (fpimm0)>;
+}
+
+// Moves between two floating-point registers.
+let neverHasSideEffects = 1 in {
+ def LER : UnaryRR <"le", 0x38, null_frag, FP32, FP32>;
+ def LDR : UnaryRR <"ld", 0x28, null_frag, FP64, FP64>;
+ def LXR : UnaryRRE<"lx", 0xB365, null_frag, FP128, FP128>;
+}
+
+// Moves between two floating-point registers that also set the condition
+// codes.
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ defm LTEBR : LoadAndTestRRE<"lteb", 0xB302, FP32>;
+ defm LTDBR : LoadAndTestRRE<"ltdb", 0xB312, FP64>;
+ defm LTXBR : LoadAndTestRRE<"ltxb", 0xB342, FP128>;
+}
+def : CompareZeroFP<LTEBRCompare, FP32>;
+def : CompareZeroFP<LTDBRCompare, FP64>;
+def : CompareZeroFP<LTXBRCompare, FP128>;
+
+// Moves between 64-bit integer and floating-point registers.
+def LGDR : UnaryRRE<"lgd", 0xB3CD, bitconvert, GR64, FP64>;
+def LDGR : UnaryRRE<"ldg", 0xB3C1, bitconvert, FP64, GR64>;
+
+// fcopysign with an FP32 result.
+let isCodeGenOnly = 1 in {
+ def CPSDRss : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP32>;
+ def CPSDRsd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP64>;
+}
+
+// The sign of an FP128 is in the high register.
+def : Pat<(fcopysign FP32:$src1, FP128:$src2),
+ (CPSDRsd FP32:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>;
+
+// fcopysign with an FP64 result.
+let isCodeGenOnly = 1 in
+ def CPSDRds : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP32>;
+def CPSDRdd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP64>;
+
+// The sign of an FP128 is in the high register.
+def : Pat<(fcopysign FP64:$src1, FP128:$src2),
+ (CPSDRdd FP64:$src1, (EXTRACT_SUBREG FP128:$src2, subreg_h64))>;
+
+// fcopysign with an FP128 result. Use "upper" as the high half and leave
+// the low half as-is.
+class CopySign128<RegisterOperand cls, dag upper>
+ : Pat<(fcopysign FP128:$src1, cls:$src2),
+ (INSERT_SUBREG FP128:$src1, upper, subreg_h64)>;
+
+def : CopySign128<FP32, (CPSDRds (EXTRACT_SUBREG FP128:$src1, subreg_h64),
+ FP32:$src2)>;
+def : CopySign128<FP64, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64),
+ FP64:$src2)>;
+def : CopySign128<FP128, (CPSDRdd (EXTRACT_SUBREG FP128:$src1, subreg_h64),
+ (EXTRACT_SUBREG FP128:$src2, subreg_h64))>;
+
+defm LoadStoreF32 : MVCLoadStore<load, f32, MVCSequence, 4>;
+defm LoadStoreF64 : MVCLoadStore<load, f64, MVCSequence, 8>;
+defm LoadStoreF128 : MVCLoadStore<load, f128, MVCSequence, 16>;
+
+//===----------------------------------------------------------------------===//
+// Load instructions
+//===----------------------------------------------------------------------===//
+
+let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
+ defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32, 4>;
+ defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64, 8>;
+
+ // These instructions are split after register allocation, so we don't
+ // want a custom inserter.
+ let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
+ def LX : Pseudo<(outs FP128:$dst), (ins bdxaddr20only128:$src),
+ [(set FP128:$dst, (load bdxaddr20only128:$src))]>;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Store instructions
+//===----------------------------------------------------------------------===//
+
+let SimpleBDXStore = 1 in {
+ defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32, 4>;
+ defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64, 8>;
+
+ // These instructions are split after register allocation, so we don't
+ // want a custom inserter.
+ let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
+ def STX : Pseudo<(outs), (ins FP128:$src, bdxaddr20only128:$dst),
+ [(store FP128:$src, bdxaddr20only128:$dst)]>;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Conversion instructions
+//===----------------------------------------------------------------------===//
+
+// Convert floating-point values to narrower representations, rounding
+// according to the current mode. The destination of LEXBR and LDXBR
+// is a 128-bit value, but only the first register of the pair is used.
+def LEDBR : UnaryRRE<"ledb", 0xB344, fround, FP32, FP64>;
+def LEXBR : UnaryRRE<"lexb", 0xB346, null_frag, FP128, FP128>;
+def LDXBR : UnaryRRE<"ldxb", 0xB345, null_frag, FP128, FP128>;
+
+def : Pat<(f32 (fround FP128:$src)),
+ (EXTRACT_SUBREG (LEXBR FP128:$src), subreg_hh32)>;
+def : Pat<(f64 (fround FP128:$src)),
+ (EXTRACT_SUBREG (LDXBR FP128:$src), subreg_h64)>;
+
+// Extend register floating-point values to wider representations.
+def LDEBR : UnaryRRE<"ldeb", 0xB304, fextend, FP64, FP32>;
+def LXEBR : UnaryRRE<"lxeb", 0xB306, fextend, FP128, FP32>;
+def LXDBR : UnaryRRE<"lxdb", 0xB305, fextend, FP128, FP64>;
+
+// Extend memory floating-point values to wider representations.
+def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64, 4>;
+def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128, 4>;
+def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128, 8>;
+
+// Convert a signed integer register value to a floating-point one.
+def CEFBR : UnaryRRE<"cefb", 0xB394, sint_to_fp, FP32, GR32>;
+def CDFBR : UnaryRRE<"cdfb", 0xB395, sint_to_fp, FP64, GR32>;
+def CXFBR : UnaryRRE<"cxfb", 0xB396, sint_to_fp, FP128, GR32>;
+
+def CEGBR : UnaryRRE<"cegb", 0xB3A4, sint_to_fp, FP32, GR64>;
+def CDGBR : UnaryRRE<"cdgb", 0xB3A5, sint_to_fp, FP64, GR64>;
+def CXGBR : UnaryRRE<"cxgb", 0xB3A6, sint_to_fp, FP128, GR64>;
+
+// Convert a floating-point register value to a signed integer value,
+// with the second operand (modifier M3) specifying the rounding mode.
+let Defs = [CC] in {
+ def CFEBR : UnaryRRF<"cfeb", 0xB398, GR32, FP32>;
+ def CFDBR : UnaryRRF<"cfdb", 0xB399, GR32, FP64>;
+ def CFXBR : UnaryRRF<"cfxb", 0xB39A, GR32, FP128>;
+
+ def CGEBR : UnaryRRF<"cgeb", 0xB3A8, GR64, FP32>;
+ def CGDBR : UnaryRRF<"cgdb", 0xB3A9, GR64, FP64>;
+ def CGXBR : UnaryRRF<"cgxb", 0xB3AA, GR64, FP128>;
+}
+
+// fp_to_sint always rounds towards zero, which is modifier value 5.
+def : Pat<(i32 (fp_to_sint FP32:$src)), (CFEBR 5, FP32:$src)>;
+def : Pat<(i32 (fp_to_sint FP64:$src)), (CFDBR 5, FP64:$src)>;
+def : Pat<(i32 (fp_to_sint FP128:$src)), (CFXBR 5, FP128:$src)>;
+
+def : Pat<(i64 (fp_to_sint FP32:$src)), (CGEBR 5, FP32:$src)>;
+def : Pat<(i64 (fp_to_sint FP64:$src)), (CGDBR 5, FP64:$src)>;
+def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>;
+
+//===----------------------------------------------------------------------===//
+// Unary arithmetic
+//===----------------------------------------------------------------------===//
+
+// Negation (Load Complement).
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def LCEBR : UnaryRRE<"lceb", 0xB303, fneg, FP32, FP32>;
+ def LCDBR : UnaryRRE<"lcdb", 0xB313, fneg, FP64, FP64>;
+ def LCXBR : UnaryRRE<"lcxb", 0xB343, fneg, FP128, FP128>;
+}
+
+// Absolute value (Load Positive).
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def LPEBR : UnaryRRE<"lpeb", 0xB300, fabs, FP32, FP32>;
+ def LPDBR : UnaryRRE<"lpdb", 0xB310, fabs, FP64, FP64>;
+ def LPXBR : UnaryRRE<"lpxb", 0xB340, fabs, FP128, FP128>;
+}
+
+// Negative absolute value (Load Negative).
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def LNEBR : UnaryRRE<"lneb", 0xB301, fnabs, FP32, FP32>;
+ def LNDBR : UnaryRRE<"lndb", 0xB311, fnabs, FP64, FP64>;
+ def LNXBR : UnaryRRE<"lnxb", 0xB341, fnabs, FP128, FP128>;
+}
+
+// Square root.
+def SQEBR : UnaryRRE<"sqeb", 0xB314, fsqrt, FP32, FP32>;
+def SQDBR : UnaryRRE<"sqdb", 0xB315, fsqrt, FP64, FP64>;
+def SQXBR : UnaryRRE<"sqxb", 0xB316, fsqrt, FP128, FP128>;
+
+def SQEB : UnaryRXE<"sqeb", 0xED14, loadu<fsqrt>, FP32, 4>;
+def SQDB : UnaryRXE<"sqdb", 0xED15, loadu<fsqrt>, FP64, 8>;
+
+// Round to an integer, with the second operand (modifier M3) specifying
+// the rounding mode. These forms always check for inexact conditions.
+def FIEBR : UnaryRRF<"fieb", 0xB357, FP32, FP32>;
+def FIDBR : UnaryRRF<"fidb", 0xB35F, FP64, FP64>;
+def FIXBR : UnaryRRF<"fixb", 0xB347, FP128, FP128>;
+
+// Extended forms of the previous three instructions. M4 can be set to 4
+// to suppress detection of inexact conditions.
+def FIEBRA : UnaryRRF4<"fiebra", 0xB357, FP32, FP32>,
+ Requires<[FeatureFPExtension]>;
+def FIDBRA : UnaryRRF4<"fidbra", 0xB35F, FP64, FP64>,
+ Requires<[FeatureFPExtension]>;
+def FIXBRA : UnaryRRF4<"fixbra", 0xB347, FP128, FP128>,
+ Requires<[FeatureFPExtension]>;
+
+// frint rounds according to the current mode (modifier 0) and detects
+// inexact conditions.
+def : Pat<(frint FP32:$src), (FIEBR 0, FP32:$src)>;
+def : Pat<(frint FP64:$src), (FIDBR 0, FP64:$src)>;
+def : Pat<(frint FP128:$src), (FIXBR 0, FP128:$src)>;
+
+let Predicates = [FeatureFPExtension] in {
+ // fnearbyint is like frint but does not detect inexact conditions.
+ def : Pat<(fnearbyint FP32:$src), (FIEBRA 0, FP32:$src, 4)>;
+ def : Pat<(fnearbyint FP64:$src), (FIDBRA 0, FP64:$src, 4)>;
+ def : Pat<(fnearbyint FP128:$src), (FIXBRA 0, FP128:$src, 4)>;
+
+ // floor is no longer allowed to raise an inexact condition,
+ // so restrict it to the cases where the condition can be suppressed.
+ // Mode 7 is round towards -inf.
+ def : Pat<(ffloor FP32:$src), (FIEBRA 7, FP32:$src, 4)>;
+ def : Pat<(ffloor FP64:$src), (FIDBRA 7, FP64:$src, 4)>;
+ def : Pat<(ffloor FP128:$src), (FIXBRA 7, FP128:$src, 4)>;
+
+ // Same idea for ceil, where mode 6 is round towards +inf.
+ def : Pat<(fceil FP32:$src), (FIEBRA 6, FP32:$src, 4)>;
+ def : Pat<(fceil FP64:$src), (FIDBRA 6, FP64:$src, 4)>;
+ def : Pat<(fceil FP128:$src), (FIXBRA 6, FP128:$src, 4)>;
+
+ // Same idea for trunc, where mode 5 is round towards zero.
+ def : Pat<(ftrunc FP32:$src), (FIEBRA 5, FP32:$src, 4)>;
+ def : Pat<(ftrunc FP64:$src), (FIDBRA 5, FP64:$src, 4)>;
+ def : Pat<(ftrunc FP128:$src), (FIXBRA 5, FP128:$src, 4)>;
+
+ // Same idea for round, where mode 1 is round towards nearest with
+ // ties away from zero.
+ def : Pat<(frnd FP32:$src), (FIEBRA 1, FP32:$src, 4)>;
+ def : Pat<(frnd FP64:$src), (FIDBRA 1, FP64:$src, 4)>;
+ def : Pat<(frnd FP128:$src), (FIXBRA 1, FP128:$src, 4)>;
+}
+
+//===----------------------------------------------------------------------===//
+// Binary arithmetic
+//===----------------------------------------------------------------------===//
+
+// Addition.
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ let isCommutable = 1 in {
+ def AEBR : BinaryRRE<"aeb", 0xB30A, fadd, FP32, FP32>;
+ def ADBR : BinaryRRE<"adb", 0xB31A, fadd, FP64, FP64>;
+ def AXBR : BinaryRRE<"axb", 0xB34A, fadd, FP128, FP128>;
+ }
+ def AEB : BinaryRXE<"aeb", 0xED0A, fadd, FP32, load, 4>;
+ def ADB : BinaryRXE<"adb", 0xED1A, fadd, FP64, load, 8>;
+}
+
+// Subtraction.
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
+ def SEBR : BinaryRRE<"seb", 0xB30B, fsub, FP32, FP32>;
+ def SDBR : BinaryRRE<"sdb", 0xB31B, fsub, FP64, FP64>;
+ def SXBR : BinaryRRE<"sxb", 0xB34B, fsub, FP128, FP128>;
+
+ def SEB : BinaryRXE<"seb", 0xED0B, fsub, FP32, load, 4>;
+ def SDB : BinaryRXE<"sdb", 0xED1B, fsub, FP64, load, 8>;
+}
+
+// Multiplication.
+let isCommutable = 1 in {
+ def MEEBR : BinaryRRE<"meeb", 0xB317, fmul, FP32, FP32>;
+ def MDBR : BinaryRRE<"mdb", 0xB31C, fmul, FP64, FP64>;
+ def MXBR : BinaryRRE<"mxb", 0xB34C, fmul, FP128, FP128>;
+}
+def MEEB : BinaryRXE<"meeb", 0xED17, fmul, FP32, load, 4>;
+def MDB : BinaryRXE<"mdb", 0xED1C, fmul, FP64, load, 8>;
+
+// f64 multiplication of two FP32 registers.
+def MDEBR : BinaryRRE<"mdeb", 0xB30C, null_frag, FP64, FP32>;
+def : Pat<(fmul (f64 (fextend FP32:$src1)), (f64 (fextend FP32:$src2))),
+ (MDEBR (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ FP32:$src1, subreg_h32), FP32:$src2)>;
+
+// f64 multiplication of an FP32 register and an f32 memory.
+def MDEB : BinaryRXE<"mdeb", 0xED0C, null_frag, FP64, load, 4>;
+def : Pat<(fmul (f64 (fextend FP32:$src1)),
+ (f64 (extloadf32 bdxaddr12only:$addr))),
+ (MDEB (INSERT_SUBREG (f64 (IMPLICIT_DEF)), FP32:$src1, subreg_h32),
+ bdxaddr12only:$addr)>;
+
+// f128 multiplication of two FP64 registers.
+def MXDBR : BinaryRRE<"mxdb", 0xB307, null_frag, FP128, FP64>;
+def : Pat<(fmul (f128 (fextend FP64:$src1)), (f128 (fextend FP64:$src2))),
+ (MXDBR (INSERT_SUBREG (f128 (IMPLICIT_DEF)),
+ FP64:$src1, subreg_h64), FP64:$src2)>;
+
+// f128 multiplication of an FP64 register and an f64 memory.
+def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load, 8>;
+def : Pat<(fmul (f128 (fextend FP64:$src1)),
+ (f128 (extloadf64 bdxaddr12only:$addr))),
+ (MXDB (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_h64),
+ bdxaddr12only:$addr)>;
+
+// Fused multiply-add.
+def MAEBR : TernaryRRD<"maeb", 0xB30E, z_fma, FP32>;
+def MADBR : TernaryRRD<"madb", 0xB31E, z_fma, FP64>;
+
+def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load, 4>;
+def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load, 8>;
+
+// Fused multiply-subtract.
+def MSEBR : TernaryRRD<"mseb", 0xB30F, z_fms, FP32>;
+def MSDBR : TernaryRRD<"msdb", 0xB31F, z_fms, FP64>;
+
+def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load, 4>;
+def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load, 8>;
+
+// Division.
+def DEBR : BinaryRRE<"deb", 0xB30D, fdiv, FP32, FP32>;
+def DDBR : BinaryRRE<"ddb", 0xB31D, fdiv, FP64, FP64>;
+def DXBR : BinaryRRE<"dxb", 0xB34D, fdiv, FP128, FP128>;
+
+def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load, 4>;
+def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load, 8>;
+
+//===----------------------------------------------------------------------===//
+// Comparisons
+//===----------------------------------------------------------------------===//
+
+let Defs = [CC], CCValues = 0xF in {
+ def CEBR : CompareRRE<"ceb", 0xB309, z_fcmp, FP32, FP32>;
+ def CDBR : CompareRRE<"cdb", 0xB319, z_fcmp, FP64, FP64>;
+ def CXBR : CompareRRE<"cxb", 0xB349, z_fcmp, FP128, FP128>;
+
+ def CEB : CompareRXE<"ceb", 0xED09, z_fcmp, FP32, load, 4>;
+ def CDB : CompareRXE<"cdb", 0xED19, z_fcmp, FP64, load, 8>;
+}
+
+//===----------------------------------------------------------------------===//
+// Peepholes
+//===----------------------------------------------------------------------===//
+
+def : Pat<(f32 fpimmneg0), (LCEBR (LZER))>;
+def : Pat<(f64 fpimmneg0), (LCDBR (LZDR))>;
+def : Pat<(f128 fpimmneg0), (LCXBR (LZXR))>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
new file mode 100644
index 000000000000..a8efe165e36f
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -0,0 +1,1603 @@
+//==- SystemZInstrFormats.td - SystemZ Instruction Formats --*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Basic SystemZ instruction definition
+//===----------------------------------------------------------------------===//
+
+class InstSystemZ<int size, dag outs, dag ins, string asmstr,
+ list<dag> pattern> : Instruction {
+ let Namespace = "SystemZ";
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let Size = size;
+ let Pattern = pattern;
+ let AsmString = asmstr;
+
+ // Some instructions come in pairs, one having a 12-bit displacement
+ // and the other having a 20-bit displacement. Both instructions in
+ // the pair have the same DispKey and their DispSizes are "12" and "20"
+ // respectively.
+ string DispKey = "";
+ string DispSize = "none";
+
+ // Many register-based <INSN>R instructions have a memory-based <INSN>
+ // counterpart. OpKey uniquely identifies <INSN>, while OpType is
+ // "reg" for <INSN>R and "mem" for <INSN>.
+ string OpKey = "";
+ string OpType = "none";
+
+ // Many distinct-operands instructions have older 2-operand equivalents.
+ // NumOpsKey uniquely identifies one of these 2-operand and 3-operand pairs,
+ // with NumOpsValue being "2" or "3" as appropriate.
+ string NumOpsKey = "";
+ string NumOpsValue = "none";
+
+ // True if this instruction is a simple D(X,B) load of a register
+ // (with no sign or zero extension).
+ bit SimpleBDXLoad = 0;
+
+ // True if this instruction is a simple D(X,B) store of a register
+ // (with no truncation).
+ bit SimpleBDXStore = 0;
+
+ // True if this instruction has a 20-bit displacement field.
+ bit Has20BitOffset = 0;
+
+ // True if addresses in this instruction have an index register.
+ bit HasIndex = 0;
+
+ // True if this is a 128-bit pseudo instruction that combines two 64-bit
+ // operations.
+ bit Is128Bit = 0;
+
+ // The access size of all memory operands in bytes, or 0 if not known.
+ bits<5> AccessBytes = 0;
+
+ // If the instruction sets CC to a useful value, this gives the mask
+ // of all possible CC results. The mask has the same form as
+ // SystemZ::CCMASK_*.
+ bits<4> CCValues = 0;
+
+ // The subset of CCValues that have the same meaning as they would after
+ // a comparison of the first operand against zero.
+ bits<4> CompareZeroCCMask = 0;
+
+ // True if the instruction is conditional and if the CC mask operand
+ // comes first (as for BRC, etc.).
+ bit CCMaskFirst = 0;
+
+ // Similar, but true if the CC mask operand comes last (as for LOC, etc.).
+ bit CCMaskLast = 0;
+
+ // True if the instruction is the "logical" rather than "arithmetic" form,
+ // in cases where a distinction exists.
+ bit IsLogical = 0;
+
+ let TSFlags{0} = SimpleBDXLoad;
+ let TSFlags{1} = SimpleBDXStore;
+ let TSFlags{2} = Has20BitOffset;
+ let TSFlags{3} = HasIndex;
+ let TSFlags{4} = Is128Bit;
+ let TSFlags{9-5} = AccessBytes;
+ let TSFlags{13-10} = CCValues;
+ let TSFlags{17-14} = CompareZeroCCMask;
+ let TSFlags{18} = CCMaskFirst;
+ let TSFlags{19} = CCMaskLast;
+ let TSFlags{20} = IsLogical;
+}
+
+//===----------------------------------------------------------------------===//
+// Mappings between instructions
+//===----------------------------------------------------------------------===//
+
+// Return the version of an instruction that has an unsigned 12-bit
+// displacement.
+def getDisp12Opcode : InstrMapping {
+ let FilterClass = "InstSystemZ";
+ let RowFields = ["DispKey"];
+ let ColFields = ["DispSize"];
+ let KeyCol = ["20"];
+ let ValueCols = [["12"]];
+}
+
+// Return the version of an instruction that has a signed 20-bit displacement.
+def getDisp20Opcode : InstrMapping {
+ let FilterClass = "InstSystemZ";
+ let RowFields = ["DispKey"];
+ let ColFields = ["DispSize"];
+ let KeyCol = ["12"];
+ let ValueCols = [["20"]];
+}
+
+// Return the memory form of a register instruction.
+def getMemOpcode : InstrMapping {
+ let FilterClass = "InstSystemZ";
+ let RowFields = ["OpKey"];
+ let ColFields = ["OpType"];
+ let KeyCol = ["reg"];
+ let ValueCols = [["mem"]];
+}
+
+// Return the 3-operand form of a 2-operand instruction.
+def getThreeOperandOpcode : InstrMapping {
+ let FilterClass = "InstSystemZ";
+ let RowFields = ["NumOpsKey"];
+ let ColFields = ["NumOpsValue"];
+ let KeyCol = ["2"];
+ let ValueCols = [["3"]];
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction formats
+//===----------------------------------------------------------------------===//
+//
+// Formats are specified using operand field declarations of the form:
+//
+// bits<4> Rn : register input or output for operand n
+// bits<m> In : immediate value of width m for operand n
+// bits<4> BDn : address operand n, which has a base and a displacement
+// bits<m> XBDn : address operand n, which has an index, a base and a
+// displacement
+// bits<4> Xn : index register for address operand n
+// bits<4> Mn : mode value for operand n
+//
+// The operand numbers ("n" in the list above) follow the architecture manual.
+// Assembly operands sometimes have a different order; in particular, R3 often
+// is often written between operands 1 and 2.
+//
+//===----------------------------------------------------------------------===//
+
+class InstRI<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<4> R1;
+ bits<16> I2;
+
+ let Inst{31-24} = op{11-4};
+ let Inst{23-20} = R1;
+ let Inst{19-16} = op{3-0};
+ let Inst{15-0} = I2;
+}
+
+class InstRIEb<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+ bits<4> M3;
+ bits<16> RI4;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = R2;
+ let Inst{31-16} = RI4;
+ let Inst{15-12} = M3;
+ let Inst{11-8} = 0;
+ let Inst{7-0} = op{7-0};
+}
+
+class InstRIEc<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<8> I2;
+ bits<4> M3;
+ bits<16> RI4;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = M3;
+ let Inst{31-16} = RI4;
+ let Inst{15-8} = I2;
+ let Inst{7-0} = op{7-0};
+}
+
+class InstRIEd<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R3;
+ bits<16> I2;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = R3;
+ let Inst{31-16} = I2;
+ let Inst{15-8} = 0;
+ let Inst{7-0} = op{7-0};
+}
+
+class InstRIEf<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+ bits<8> I3;
+ bits<8> I4;
+ bits<8> I5;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = R2;
+ let Inst{31-24} = I3;
+ let Inst{23-16} = I4;
+ let Inst{15-8} = I5;
+ let Inst{7-0} = op{7-0};
+}
+
+class InstRIL<bits<12> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<32> I2;
+
+ let Inst{47-40} = op{11-4};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = op{3-0};
+ let Inst{31-0} = I2;
+}
+
+class InstRR<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<2, outs, ins, asmstr, pattern> {
+ field bits<16> Inst;
+ field bits<16> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+
+ let Inst{15-8} = op;
+ let Inst{7-4} = R1;
+ let Inst{3-0} = R2;
+}
+
+class InstRRD<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R3;
+ bits<4> R2;
+
+ let Inst{31-16} = op;
+ let Inst{15-12} = R1;
+ let Inst{11-8} = 0;
+ let Inst{7-4} = R3;
+ let Inst{3-0} = R2;
+}
+
+class InstRRE<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+
+ let Inst{31-16} = op;
+ let Inst{15-8} = 0;
+ let Inst{7-4} = R1;
+ let Inst{3-0} = R2;
+}
+
+class InstRRF<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R2;
+ bits<4> R3;
+ bits<4> R4;
+
+ let Inst{31-16} = op;
+ let Inst{15-12} = R3;
+ let Inst{11-8} = R4;
+ let Inst{7-4} = R1;
+ let Inst{3-0} = R2;
+}
+
+class InstRX<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<4> R1;
+ bits<20> XBD2;
+
+ let Inst{31-24} = op;
+ let Inst{23-20} = R1;
+ let Inst{19-0} = XBD2;
+
+ let HasIndex = 1;
+}
+
+class InstRXE<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<20> XBD2;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-16} = XBD2;
+ let Inst{15-8} = 0;
+ let Inst{7-0} = op{7-0};
+
+ let HasIndex = 1;
+}
+
+class InstRXF<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R3;
+ bits<20> XBD2;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R3;
+ let Inst{35-16} = XBD2;
+ let Inst{15-12} = R1;
+ let Inst{11-8} = 0;
+ let Inst{7-0} = op{7-0};
+
+ let HasIndex = 1;
+}
+
+class InstRXY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<28> XBD2;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-8} = XBD2;
+ let Inst{7-0} = op{7-0};
+
+ let Has20BitOffset = 1;
+ let HasIndex = 1;
+}
+
+class InstRS<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R3;
+ bits<16> BD2;
+
+ let Inst{31-24} = op;
+ let Inst{23-20} = R1;
+ let Inst{19-16} = R3;
+ let Inst{15-0} = BD2;
+}
+
+class InstRSY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<4> R1;
+ bits<4> R3;
+ bits<24> BD2;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-36} = R1;
+ let Inst{35-32} = R3;
+ let Inst{31-8} = BD2;
+ let Inst{7-0} = op{7-0};
+
+ let Has20BitOffset = 1;
+}
+
+class InstSI<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<4, outs, ins, asmstr, pattern> {
+ field bits<32> Inst;
+ field bits<32> SoftFail = 0;
+
+ bits<16> BD1;
+ bits<8> I2;
+
+ let Inst{31-24} = op;
+ let Inst{23-16} = I2;
+ let Inst{15-0} = BD1;
+}
+
+class InstSIL<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<16> BD1;
+ bits<16> I2;
+
+ let Inst{47-32} = op;
+ let Inst{31-16} = BD1;
+ let Inst{15-0} = I2;
+}
+
+class InstSIY<bits<16> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<24> BD1;
+ bits<8> I2;
+
+ let Inst{47-40} = op{15-8};
+ let Inst{39-32} = I2;
+ let Inst{31-8} = BD1;
+ let Inst{7-0} = op{7-0};
+
+ let Has20BitOffset = 1;
+}
+
+class InstSS<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSystemZ<6, outs, ins, asmstr, pattern> {
+ field bits<48> Inst;
+ field bits<48> SoftFail = 0;
+
+ bits<24> BDL1;
+ bits<16> BD2;
+
+ let Inst{47-40} = op;
+ let Inst{39-16} = BDL1;
+ let Inst{15-0} = BD2;
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions with semantics
+//===----------------------------------------------------------------------===//
+//
+// These classes have the form [Cond]<Category><Format>, where <Format> is one
+// of the formats defined above and where <Category> describes the inputs
+// and outputs. "Cond" is used if the instruction is conditional,
+// in which case the 4-bit condition-code mask is added as a final operand.
+// <Category> can be one of:
+//
+// Inherent:
+// One register output operand and no input operands.
+//
+// BranchUnary:
+// One register output operand, one register input operand and
+// one branch displacement. The instructions stores a modified
+// form of the source register in the destination register and
+// branches on the result.
+//
+// Store:
+// One register or immediate input operand and one address input operand.
+// The instruction stores the first operand to the address.
+//
+// This category is used for both pure and truncating stores.
+//
+// LoadMultiple:
+// One address input operand and two explicit output operands.
+// The instruction loads a range of registers from the address,
+// with the explicit operands giving the first and last register
+// to load. Other loaded registers are added as implicit definitions.
+//
+// StoreMultiple:
+// Two explicit input register operands and an address operand.
+// The instruction stores a range of registers to the address,
+// with the explicit operands giving the first and last register
+// to store. Other stored registers are added as implicit uses.
+//
+// Unary:
+// One register output operand and one input operand. The input
+// operand may be a register, immediate or memory.
+//
+// Binary:
+// One register output operand and two input operands. The first
+// input operand is always a register and he second may be a register,
+// immediate or memory.
+//
+// Shift:
+// One register output operand and two input operands. The first
+// input operand is a register and the second has the same form as
+// an address (although it isn't actually used to address memory).
+//
+// Compare:
+// Two input operands. The first operand is always a register,
+// the second may be a register, immediate or memory.
+//
+// Ternary:
+// One register output operand and three register input operands.
+//
+// CmpSwap:
+// One output operand and three input operands. The first two
+// operands are registers and the third is an address. The instruction
+// both reads from and writes to the address.
+//
+// RotateSelect:
+// One output operand and five input operands. The first two operands
+// are registers and the other three are immediates.
+//
+// Prefetch:
+// One 4-bit immediate operand and one address operand. The immediate
+// operand is 1 for a load prefetch and 2 for a store prefetch.
+//
+// The format determines which input operands are tied to output operands,
+// and also determines the shape of any address operand.
+//
+// Multiclasses of the form <Category><Format>Pair define two instructions,
+// one with <Category><Format> and one with <Category><Format>Y. The name
+// of the first instruction has no suffix, the name of the second has
+// an extra "y".
+//
+//===----------------------------------------------------------------------===//
+
+class InherentRRE<string mnemonic, bits<16> opcode, RegisterOperand cls,
+ dag src>
+ : InstRRE<opcode, (outs cls:$R1), (ins),
+ mnemonic#"\t$R1",
+ [(set cls:$R1, src)]> {
+ let R2 = 0;
+}
+
+class BranchUnaryRI<string mnemonic, bits<12> opcode, RegisterOperand cls>
+ : InstRI<opcode, (outs cls:$R1), (ins cls:$R1src, brtarget16:$I2),
+ mnemonic##"\t$R1, $I2", []> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class LoadMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls>
+ : InstRSY<opcode, (outs cls:$R1, cls:$R3), (ins bdaddr20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
+ let mayLoad = 1;
+}
+
+class StoreRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRIL<opcode, (outs), (ins cls:$R1, pcrel32:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, pcrel32:$I2)]> {
+ let mayStore = 1;
+ // We want PC-relative addresses to be tried ahead of BD and BDX addresses.
+ // However, BDXs have two extra operands and are therefore 6 units more
+ // complex.
+ let AddedComplexity = 7;
+}
+
+class StoreRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr12only>
+ : InstRX<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, mode:$XBD2)]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let mayStore = 1;
+ let AccessBytes = bytes;
+}
+
+class StoreRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : InstRXY<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, mode:$XBD2)]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let mayStore = 1;
+ let AccessBytes = bytes;
+}
+
+multiclass StoreRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
+ SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : StoreRX<mnemonic, rxOpcode, operator, cls, bytes, bdxaddr12pair>;
+ let DispSize = "20" in
+ def Y : StoreRXY<mnemonic#"y", rxyOpcode, operator, cls, bytes,
+ bdxaddr20pair>;
+ }
+}
+
+class StoreMultipleRSY<string mnemonic, bits<16> opcode, RegisterOperand cls>
+ : InstRSY<opcode, (outs), (ins cls:$R1, cls:$R3, bdaddr20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2", []> {
+ let mayStore = 1;
+}
+
+// StoreSI* instructions are used to store an integer to memory, but the
+// addresses are more restricted than for normal stores. If we are in the
+// situation of having to force either the address into a register or the
+// constant into a register, it's usually better to do the latter.
+// We therefore match the address in the same way as a normal store and
+// only use the StoreSI* instruction if the matched address is suitable.
+class StoreSI<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ Immediate imm>
+ : InstSI<opcode, (outs), (ins mviaddr12pair:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator imm:$I2, mviaddr12pair:$BD1)]> {
+ let mayStore = 1;
+}
+
+class StoreSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ Immediate imm>
+ : InstSIY<opcode, (outs), (ins mviaddr20pair:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator imm:$I2, mviaddr20pair:$BD1)]> {
+ let mayStore = 1;
+}
+
+class StoreSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ Immediate imm>
+ : InstSIL<opcode, (outs), (ins mviaddr12pair:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator imm:$I2, mviaddr12pair:$BD1)]> {
+ let mayStore = 1;
+}
+
+multiclass StoreSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode,
+ SDPatternOperator operator, Immediate imm> {
+ let DispKey = mnemonic in {
+ let DispSize = "12" in
+ def "" : StoreSI<mnemonic, siOpcode, operator, imm>;
+ let DispSize = "20" in
+ def Y : StoreSIY<mnemonic#"y", siyOpcode, operator, imm>;
+ }
+}
+
+class CondStoreRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, cond4:$valid, cond4:$R3),
+ mnemonic#"$R3\t$R1, $BD2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayStore = 1;
+ let AccessBytes = bytes;
+ let CCMaskLast = 1;
+}
+
+// Like CondStoreRSY, but used for the raw assembly form. The condition-code
+// mask is the third operand rather than being part of the mnemonic.
+class AsmCondStoreRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, uimm8zx4:$R3),
+ mnemonic#"\t$R1, $BD2, $R3", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayStore = 1;
+ let AccessBytes = bytes;
+}
+
+// Like CondStoreRSY, but with a fixed CC mask.
+class FixedCondStoreRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<4> ccmask, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2),
+ mnemonic#"\t$R1, $BD2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayStore = 1;
+ let AccessBytes = bytes;
+ let R3 = ccmask;
+}
+
+class UnaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRR<opcode, (outs cls1:$R1), (ins cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+}
+
+class UnaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRRE<opcode, (outs cls1:$R1), (ins cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+}
+
+class UnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2),
+ mnemonic#"r\t$R1, $R3, $R2", []> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let R4 = 0;
+}
+
+class UnaryRRF4<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2, uimm8zx4:$R4),
+ mnemonic#"\t$R1, $R3, $R2, $R4", []>;
+
+// These instructions are generated by if conversion. The old value of R1
+// is added as an implicit use.
+class CondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls2:$R2, cond4:$valid, cond4:$R3),
+ mnemonic#"r$R3\t$R1, $R2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let CCMaskLast = 1;
+ let R4 = 0;
+}
+
+// Like CondUnaryRRF, but used for the raw assembly form. The condition-code
+// mask is the third operand rather than being part of the mnemonic.
+class AsmCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2, uimm8zx4:$R3),
+ mnemonic#"r\t$R1, $R2, $R3", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let R4 = 0;
+}
+
+// Like CondUnaryRRF, but with a fixed CC mask.
+class FixedCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2, bits<4> ccmask>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2),
+ mnemonic#"\t$R1, $R2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let R3 = ccmask;
+ let R4 = 0;
+}
+
+class UnaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRI<opcode, (outs cls:$R1), (ins imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator imm:$I2))]>;
+
+class UnaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRIL<opcode, (outs cls:$R1), (ins imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator imm:$I2))]>;
+
+class UnaryRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRIL<opcode, (outs cls:$R1), (ins pcrel32:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator pcrel32:$I2))]> {
+ let mayLoad = 1;
+ // We want PC-relative addresses to be tried ahead of BD and BDX addresses.
+ // However, BDXs have two extra operands and are therefore 6 units more
+ // complex.
+ let AddedComplexity = 7;
+}
+
+class CondUnaryRSY<string mnemonic, bits<16> opcode,
+ SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes, AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1),
+ (ins cls:$R1src, mode:$BD2, cond4:$valid, cond4:$R3),
+ mnemonic#"$R3\t$R1, $BD2",
+ [(set cls:$R1,
+ (z_select_ccmask (load bdaddr20only:$BD2), cls:$R1src,
+ cond4:$valid, cond4:$R3))]>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+ let CCMaskLast = 1;
+}
+
+// Like CondUnaryRSY, but used for the raw assembly form. The condition-code
+// mask is the third operand rather than being part of the mnemonic.
+class AsmCondUnaryRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2, uimm8zx4:$R3),
+ mnemonic#"\t$R1, $BD2, $R3", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+// Like CondUnaryRSY, but with a fixed CC mask.
+class FixedCondUnaryRSY<string mnemonic, bits<16> opcode,
+ RegisterOperand cls, bits<4> ccmask, bits<5> bytes,
+ AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2),
+ mnemonic#"\t$R1, $BD2", []>,
+ Requires<[FeatureLoadStoreOnCond]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let R3 = ccmask;
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class UnaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr12only>
+ : InstRX<opcode, (outs cls:$R1), (ins mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class UnaryRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, bits<5> bytes>
+ : InstRXE<opcode, (outs cls:$R1), (ins bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator bdxaddr12only:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class UnaryRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : InstRXY<opcode, (outs cls:$R1), (ins mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+multiclass UnaryRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
+ SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : UnaryRX<mnemonic, rxOpcode, operator, cls, bytes, bdxaddr12pair>;
+ let DispSize = "20" in
+ def Y : UnaryRXY<mnemonic#"y", rxyOpcode, operator, cls, bytes,
+ bdxaddr20pair>;
+ }
+}
+
+class BinaryRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRR<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls1:$R1src, cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class BinaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRRE<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(set cls1:$R1, (operator cls1:$R1src, cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class BinaryRRF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R3, cls2:$R2),
+ mnemonic#"r\t$R1, $R3, $R2",
+ [(set cls1:$R1, (operator cls1:$R3, cls2:$R2))]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let R4 = 0;
+}
+
+class BinaryRRFK<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R2, cls2:$R3),
+ mnemonic#"rk\t$R1, $R2, $R3",
+ [(set cls1:$R1, (operator cls1:$R2, cls2:$R3))]> {
+ let R4 = 0;
+}
+
+multiclass BinaryRRAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls1,
+ RegisterOperand cls2> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRRFK<mnemonic, opcode2, null_frag, cls1, cls2>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRR<mnemonic, opcode1, operator, cls1, cls2>;
+ }
+}
+
+multiclass BinaryRREAndK<string mnemonic, bits<16> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls1,
+ RegisterOperand cls2> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRRFK<mnemonic, opcode2, null_frag, cls1, cls2>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRRE<mnemonic, opcode1, operator, cls1, cls2>;
+ }
+}
+
+class BinaryRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRI<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class BinaryRIE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRIEd<opcode, (outs cls:$R1), (ins cls:$R3, imm:$I2),
+ mnemonic#"\t$R1, $R3, $I2",
+ [(set cls:$R1, (operator cls:$R3, imm:$I2))]>;
+
+multiclass BinaryRIAndK<string mnemonic, bits<12> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRIE<mnemonic##"k", opcode2, null_frag, cls, imm>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRI<mnemonic, opcode1, operator, cls, imm>;
+ }
+}
+
+class BinaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRIL<opcode, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class BinaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
+ AddressingMode mode = bdxaddr12only>
+ : InstRX<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src, (load mode:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class BinaryRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes>
+ : InstRXE<opcode, (outs cls:$R1), (ins cls:$R1src, bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src,
+ (load bdxaddr12only:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class BinaryRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : InstRXY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src, (load mode:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+multiclass BinaryRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
+ SDPatternOperator operator, RegisterOperand cls,
+ SDPatternOperator load, bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : BinaryRX<mnemonic, rxOpcode, operator, cls, load, bytes,
+ bdxaddr12pair>;
+ let DispSize = "20" in
+ def Y : BinaryRXY<mnemonic#"y", rxyOpcode, operator, cls, load, bytes,
+ bdxaddr20pair>;
+ }
+}
+
+class BinarySI<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ Operand imm, AddressingMode mode = bdaddr12only>
+ : InstSI<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(store (operator (load mode:$BD1), imm:$I2), mode:$BD1)]> {
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+class BinarySIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ Operand imm, AddressingMode mode = bdaddr20only>
+ : InstSIY<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(store (operator (load mode:$BD1), imm:$I2), mode:$BD1)]> {
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+multiclass BinarySIPair<string mnemonic, bits<8> siOpcode,
+ bits<16> siyOpcode, SDPatternOperator operator,
+ Operand imm> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : BinarySI<mnemonic, siOpcode, operator, imm, bdaddr12pair>;
+ let DispSize = "20" in
+ def Y : BinarySIY<mnemonic#"y", siyOpcode, operator, imm, bdaddr20pair>;
+ }
+}
+
+class ShiftRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, shift12only:$BD2),
+ mnemonic#"\t$R1, $BD2",
+ [(set cls:$R1, (operator cls:$R1src, shift12only:$BD2))]> {
+ let R3 = 0;
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class ShiftRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, shift20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator cls:$R3, shift20only:$BD2))]>;
+
+multiclass ShiftRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : ShiftRSY<mnemonic##"k", opcode2, null_frag, cls>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : ShiftRS<mnemonic, opcode1, operator, cls>;
+ }
+}
+
+class CompareRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRR<opcode, (outs), (ins cls1:$R1, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(operator cls1:$R1, cls2:$R2)]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let isCompare = 1;
+}
+
+class CompareRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : InstRRE<opcode, (outs), (ins cls1:$R1, cls2:$R2),
+ mnemonic#"r\t$R1, $R2",
+ [(operator cls1:$R1, cls2:$R2)]> {
+ let OpKey = mnemonic ## cls1;
+ let OpType = "reg";
+ let isCompare = 1;
+}
+
+class CompareRI<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRI<opcode, (outs), (ins cls:$R1, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, imm:$I2)]> {
+ let isCompare = 1;
+}
+
+class CompareRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm>
+ : InstRIL<opcode, (outs), (ins cls:$R1, imm:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, imm:$I2)]> {
+ let isCompare = 1;
+}
+
+class CompareRILPC<string mnemonic, bits<12> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load>
+ : InstRIL<opcode, (outs), (ins cls:$R1, pcrel32:$I2),
+ mnemonic#"\t$R1, $I2",
+ [(operator cls:$R1, (load pcrel32:$I2))]> {
+ let isCompare = 1;
+ let mayLoad = 1;
+ // We want PC-relative addresses to be tried ahead of BD and BDX addresses.
+ // However, BDXs have two extra operands and are therefore 6 units more
+ // complex.
+ let AddedComplexity = 7;
+}
+
+class CompareRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
+ AddressingMode mode = bdxaddr12only>
+ : InstRX<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, (load mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let isCompare = 1;
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class CompareRXE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes>
+ : InstRXE<opcode, (outs), (ins cls:$R1, bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, (load bdxaddr12only:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let isCompare = 1;
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class CompareRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : InstRXY<opcode, (outs), (ins cls:$R1, mode:$XBD2),
+ mnemonic#"\t$R1, $XBD2",
+ [(operator cls:$R1, (load mode:$XBD2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let isCompare = 1;
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+multiclass CompareRXPair<string mnemonic, bits<8> rxOpcode, bits<16> rxyOpcode,
+ SDPatternOperator operator, RegisterOperand cls,
+ SDPatternOperator load, bits<5> bytes> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : CompareRX<mnemonic, rxOpcode, operator, cls,
+ load, bytes, bdxaddr12pair>;
+ let DispSize = "20" in
+ def Y : CompareRXY<mnemonic#"y", rxyOpcode, operator, cls,
+ load, bytes, bdxaddr20pair>;
+ }
+}
+
+class CompareSI<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ SDPatternOperator load, Immediate imm,
+ AddressingMode mode = bdaddr12only>
+ : InstSI<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator (load mode:$BD1), imm:$I2)]> {
+ let isCompare = 1;
+ let mayLoad = 1;
+}
+
+class CompareSIL<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ SDPatternOperator load, Immediate imm>
+ : InstSIL<opcode, (outs), (ins bdaddr12only:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator (load bdaddr12only:$BD1), imm:$I2)]> {
+ let isCompare = 1;
+ let mayLoad = 1;
+}
+
+class CompareSIY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ SDPatternOperator load, Immediate imm,
+ AddressingMode mode = bdaddr20only>
+ : InstSIY<opcode, (outs), (ins mode:$BD1, imm:$I2),
+ mnemonic#"\t$BD1, $I2",
+ [(operator (load mode:$BD1), imm:$I2)]> {
+ let isCompare = 1;
+ let mayLoad = 1;
+}
+
+multiclass CompareSIPair<string mnemonic, bits<8> siOpcode, bits<16> siyOpcode,
+ SDPatternOperator operator, SDPatternOperator load,
+ Immediate imm> {
+ let DispKey = mnemonic in {
+ let DispSize = "12" in
+ def "" : CompareSI<mnemonic, siOpcode, operator, load, imm, bdaddr12pair>;
+ let DispSize = "20" in
+ def Y : CompareSIY<mnemonic#"y", siyOpcode, operator, load, imm,
+ bdaddr20pair>;
+ }
+}
+
+class TernaryRRD<string mnemonic, bits<16> opcode,
+ SDPatternOperator operator, RegisterOperand cls>
+ : InstRRD<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, cls:$R2),
+ mnemonic#"r\t$R1, $R3, $R2",
+ [(set cls:$R1, (operator cls:$R1src, cls:$R3, cls:$R2))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "reg";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, SDPatternOperator load, bits<5> bytes>
+ : InstRXF<opcode, (outs cls:$R1),
+ (ins cls:$R1src, cls:$R3, bdxaddr12only:$XBD2),
+ mnemonic#"\t$R1, $R3, $XBD2",
+ [(set cls:$R1, (operator cls:$R1src, cls:$R3,
+ (load bdxaddr12only:$XBD2)))]> {
+ let OpKey = mnemonic ## cls;
+ let OpType = "mem";
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let AccessBytes = bytes;
+}
+
+class CmpSwapRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls, AddressingMode mode = bdaddr12only>
+ : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator mode:$BD2, cls:$R1src, cls:$R3))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+class CmpSwapRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator mode:$BD2, cls:$R1src, cls:$R3))]> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
+multiclass CmpSwapRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode,
+ SDPatternOperator operator, RegisterOperand cls> {
+ let DispKey = mnemonic ## #cls in {
+ let DispSize = "12" in
+ def "" : CmpSwapRS<mnemonic, rsOpcode, operator, cls, bdaddr12pair>;
+ let DispSize = "20" in
+ def Y : CmpSwapRSY<mnemonic#"y", rsyOpcode, operator, cls, bdaddr20pair>;
+ }
+}
+
+class RotateSelectRIEf<string mnemonic, bits<16> opcode, RegisterOperand cls1,
+ RegisterOperand cls2>
+ : InstRIEf<opcode, (outs cls1:$R1),
+ (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ mnemonic#"\t$R1, $R2, $I3, $I4, $I5", []> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class PrefetchRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator>
+ : InstRXY<opcode, (outs), (ins uimm8zx4:$R1, bdxaddr20only:$XBD2),
+ mnemonic##"\t$R1, $XBD2",
+ [(operator uimm8zx4:$R1, bdxaddr20only:$XBD2)]>;
+
+class PrefetchRILPC<string mnemonic, bits<12> opcode,
+ SDPatternOperator operator>
+ : InstRIL<opcode, (outs), (ins uimm8zx4:$R1, pcrel32:$I2),
+ mnemonic##"\t$R1, $I2",
+ [(operator uimm8zx4:$R1, pcrel32:$I2)]> {
+ // We want PC-relative addresses to be tried ahead of BD and BDX addresses.
+ // However, BDXs have two extra operands and are therefore 6 units more
+ // complex.
+ let AddedComplexity = 7;
+}
+
+// A floating-point load-and test operation. Create both a normal unary
+// operation and one that acts as a comparison against zero.
+multiclass LoadAndTestRRE<string mnemonic, bits<16> opcode,
+ RegisterOperand cls> {
+ def "" : UnaryRRE<mnemonic, opcode, null_frag, cls, cls>;
+ let isCodeGenOnly = 1 in
+ def Compare : CompareRRE<mnemonic, opcode, null_frag, cls, cls>;
+}
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions
+//===----------------------------------------------------------------------===//
+//
+// Convenience instructions that get lowered to real instructions
+// by either SystemZTargetLowering::EmitInstrWithCustomInserter()
+// or SystemZInstrInfo::expandPostRAPseudo().
+//
+//===----------------------------------------------------------------------===//
+
+class Pseudo<dag outs, dag ins, list<dag> pattern>
+ : InstSystemZ<0, outs, ins, "", pattern> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+// Like UnaryRI, but expanded after RA depending on the choice of register.
+class UnaryRIPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs cls:$R1), (ins imm:$I2),
+ [(set cls:$R1, (operator imm:$I2))]>;
+
+// Like UnaryRXY, but expanded after RA depending on the choice of register.
+class UnaryRXYPseudo<string key, SDPatternOperator operator,
+ RegisterOperand cls, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : Pseudo<(outs cls:$R1), (ins mode:$XBD2),
+ [(set cls:$R1, (operator mode:$XBD2))]> {
+ let OpKey = key ## cls;
+ let OpType = "mem";
+ let mayLoad = 1;
+ let Has20BitOffset = 1;
+ let HasIndex = 1;
+ let AccessBytes = bytes;
+}
+
+// Like UnaryRR, but expanded after RA depending on the choice of registers.
+class UnaryRRPseudo<string key, SDPatternOperator operator,
+ RegisterOperand cls1, RegisterOperand cls2>
+ : Pseudo<(outs cls1:$R1), (ins cls2:$R2),
+ [(set cls1:$R1, (operator cls2:$R2))]> {
+ let OpKey = key ## cls1;
+ let OpType = "reg";
+}
+
+// Like BinaryRI, but expanded after RA depending on the choice of register.
+class BinaryRIPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+}
+
+// Like BinaryRIE, but expanded after RA depending on the choice of register.
+class BinaryRIEPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs cls:$R1), (ins cls:$R3, imm:$I2),
+ [(set cls:$R1, (operator cls:$R3, imm:$I2))]>;
+
+// Like BinaryRIAndK, but expanded after RA depending on the choice of register.
+multiclass BinaryRIAndKPseudo<string key, SDPatternOperator operator,
+ RegisterOperand cls, Immediate imm> {
+ let NumOpsKey = key in {
+ let NumOpsValue = "3" in
+ def K : BinaryRIEPseudo<null_frag, cls, imm>,
+ Requires<[FeatureHighWord, FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRIPseudo<operator, cls, imm>,
+ Requires<[FeatureHighWord]>;
+ }
+}
+
+// Like CompareRI, but expanded after RA depending on the choice of register.
+class CompareRIPseudo<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Pseudo<(outs), (ins cls:$R1, imm:$I2), [(operator cls:$R1, imm:$I2)]>;
+
+// Like CompareRXY, but expanded after RA depending on the choice of register.
+class CompareRXYPseudo<SDPatternOperator operator, RegisterOperand cls,
+ SDPatternOperator load, bits<5> bytes,
+ AddressingMode mode = bdxaddr20only>
+ : Pseudo<(outs), (ins cls:$R1, mode:$XBD2),
+ [(operator cls:$R1, (load mode:$XBD2))]> {
+ let mayLoad = 1;
+ let Has20BitOffset = 1;
+ let HasIndex = 1;
+ let AccessBytes = bytes;
+}
+
+// Like StoreRXY, but expanded after RA depending on the choice of register.
+class StoreRXYPseudo<SDPatternOperator operator, RegisterOperand cls,
+ bits<5> bytes, AddressingMode mode = bdxaddr20only>
+ : Pseudo<(outs), (ins cls:$R1, mode:$XBD2),
+ [(operator cls:$R1, mode:$XBD2)]> {
+ let mayStore = 1;
+ let Has20BitOffset = 1;
+ let HasIndex = 1;
+ let AccessBytes = bytes;
+}
+
+// Like RotateSelectRIEf, but expanded after RA depending on the choice
+// of registers.
+class RotateSelectRIEfPseudo<RegisterOperand cls1, RegisterOperand cls2>
+ : Pseudo<(outs cls1:$R1),
+ (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ []> {
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+// Implements "$dst = $cc & (8 >> CC) ? $src1 : $src2", where CC is
+// the value of the PSW's 2-bit condition code field.
+class SelectWrapper<RegisterOperand cls>
+ : Pseudo<(outs cls:$dst),
+ (ins cls:$src1, cls:$src2, uimm8zx4:$valid, uimm8zx4:$cc),
+ [(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2,
+ uimm8zx4:$valid, uimm8zx4:$cc))]> {
+ let usesCustomInserter = 1;
+ // Although the instructions used by these nodes do not in themselves
+ // change CC, the insertion requires new blocks, and CC cannot be live
+ // across them.
+ let Defs = [CC];
+ let Uses = [CC];
+}
+
+// Stores $new to $addr if $cc is true ("" case) or false (Inv case).
+multiclass CondStores<RegisterOperand cls, SDPatternOperator store,
+ SDPatternOperator load, AddressingMode mode> {
+ let Defs = [CC], Uses = [CC], usesCustomInserter = 1 in {
+ def "" : Pseudo<(outs),
+ (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ [(store (z_select_ccmask cls:$new, (load mode:$addr),
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr)]>;
+ def Inv : Pseudo<(outs),
+ (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ [(store (z_select_ccmask (load mode:$addr), cls:$new,
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr)]>;
+ }
+}
+
+// OPERATOR is ATOMIC_SWAP or an ATOMIC_LOAD_* operation. PAT and OPERAND
+// describe the second (non-memory) operand.
+class AtomicLoadBinary<SDPatternOperator operator, RegisterOperand cls,
+ dag pat, DAGOperand operand>
+ : Pseudo<(outs cls:$dst), (ins bdaddr20only:$ptr, operand:$src2),
+ [(set cls:$dst, (operator bdaddr20only:$ptr, pat))]> {
+ let Defs = [CC];
+ let Has20BitOffset = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+ let usesCustomInserter = 1;
+}
+
+// Specializations of AtomicLoadWBinary.
+class AtomicLoadBinaryReg32<SDPatternOperator operator>
+ : AtomicLoadBinary<operator, GR32, (i32 GR32:$src2), GR32>;
+class AtomicLoadBinaryImm32<SDPatternOperator operator, Immediate imm>
+ : AtomicLoadBinary<operator, GR32, (i32 imm:$src2), imm>;
+class AtomicLoadBinaryReg64<SDPatternOperator operator>
+ : AtomicLoadBinary<operator, GR64, (i64 GR64:$src2), GR64>;
+class AtomicLoadBinaryImm64<SDPatternOperator operator, Immediate imm>
+ : AtomicLoadBinary<operator, GR64, (i64 imm:$src2), imm>;
+
+// OPERATOR is ATOMIC_SWAPW or an ATOMIC_LOADW_* operation. PAT and OPERAND
+// describe the second (non-memory) operand.
+class AtomicLoadWBinary<SDPatternOperator operator, dag pat,
+ DAGOperand operand>
+ : Pseudo<(outs GR32:$dst),
+ (ins bdaddr20only:$ptr, operand:$src2, ADDR32:$bitshift,
+ ADDR32:$negbitshift, uimm32:$bitsize),
+ [(set GR32:$dst, (operator bdaddr20only:$ptr, pat, ADDR32:$bitshift,
+ ADDR32:$negbitshift, uimm32:$bitsize))]> {
+ let Defs = [CC];
+ let Has20BitOffset = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+ let usesCustomInserter = 1;
+}
+
+// Specializations of AtomicLoadWBinary.
+class AtomicLoadWBinaryReg<SDPatternOperator operator>
+ : AtomicLoadWBinary<operator, (i32 GR32:$src2), GR32>;
+class AtomicLoadWBinaryImm<SDPatternOperator operator, Immediate imm>
+ : AtomicLoadWBinary<operator, (i32 imm:$src2), imm>;
+
+// Define an instruction that operates on two fixed-length blocks of memory,
+// and associated pseudo instructions for operating on blocks of any size.
+// The Sequence form uses a straight-line sequence of instructions and
+// the Loop form uses a loop of length-256 instructions followed by
+// another instruction to handle the excess.
+multiclass MemorySS<string mnemonic, bits<8> opcode,
+ SDPatternOperator sequence, SDPatternOperator loop> {
+ def "" : InstSS<opcode, (outs), (ins bdladdr12onlylen8:$BDL1,
+ bdaddr12only:$BD2),
+ mnemonic##"\t$BDL1, $BD2", []>;
+ let usesCustomInserter = 1 in {
+ def Sequence : Pseudo<(outs), (ins bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length),
+ [(sequence bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length)]>;
+ def Loop : Pseudo<(outs), (ins bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length, GR64:$count256),
+ [(loop bdaddr12only:$dest, bdaddr12only:$src,
+ imm64:$length, GR64:$count256)]>;
+ }
+}
+
+// Define an instruction that operates on two strings, both terminated
+// by the character in R0. The instruction processes a CPU-determinated
+// number of bytes at a time and sets CC to 3 if the instruction needs
+// to be repeated. Also define a pseudo instruction that represents
+// the full loop (the main instruction plus the branch on CC==3).
+multiclass StringRRE<string mnemonic, bits<16> opcode,
+ SDPatternOperator operator> {
+ def "" : InstRRE<opcode, (outs GR64:$R1, GR64:$R2),
+ (ins GR64:$R1src, GR64:$R2src),
+ mnemonic#"\t$R1, $R2", []> {
+ let Constraints = "$R1 = $R1src, $R2 = $R2src";
+ let DisableEncoding = "$R1src, $R2src";
+ }
+ let usesCustomInserter = 1 in
+ def Loop : Pseudo<(outs GR64:$end),
+ (ins GR64:$start1, GR64:$start2, GR32:$char),
+ [(set GR64:$end, (operator GR64:$start1, GR64:$start2,
+ GR32:$char))]>;
+}
+
+// A pseudo instruction that is a direct alias of a real instruction.
+// These aliases are used in cases where a particular register operand is
+// fixed or where the same instruction is used with different register sizes.
+// The size parameter is the size in bytes of the associated real instruction.
+class Alias<int size, dag outs, dag ins, list<dag> pattern>
+ : InstSystemZ<size, outs, ins, "", pattern> {
+ let isPseudo = 1;
+ let isCodeGenOnly = 1;
+}
+
+// An alias of a BinaryRI, but with different register sizes.
+class BinaryAliasRI<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Alias<4, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+}
+
+// An alias of a BinaryRIL, but with different register sizes.
+class BinaryAliasRIL<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Alias<6, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
+ [(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
+ let Constraints = "$R1 = $R1src";
+}
+
+// An alias of a CompareRI, but with different register sizes.
+class CompareAliasRI<SDPatternOperator operator, RegisterOperand cls,
+ Immediate imm>
+ : Alias<4, (outs), (ins cls:$R1, imm:$I2), [(operator cls:$R1, imm:$I2)]> {
+ let isCompare = 1;
+}
+
+// An alias of a RotateSelectRIEf, but with different register sizes.
+class RotateSelectAliasRIEf<RegisterOperand cls1, RegisterOperand cls2>
+ : Alias<6, (outs cls1:$R1),
+ (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5), []> {
+ let Constraints = "$R1 = $R1src";
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
new file mode 100644
index 000000000000..acfeed80b54a
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -0,0 +1,1247 @@
+//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SystemZ implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZInstrInfo.h"
+#include "SystemZTargetMachine.h"
+#include "SystemZInstrBuilder.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#define GET_INSTRMAP_INFO
+#include "SystemZGenInstrInfo.inc"
+
+using namespace llvm;
+
+// Return a mask with Count low bits set.
+static uint64_t allOnes(unsigned int Count) {
+ return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
+}
+
+// Reg should be a 32-bit GPR. Return true if it is a high register rather
+// than a low register.
+static bool isHighReg(unsigned int Reg) {
+ if (SystemZ::GRH32BitRegClass.contains(Reg))
+ return true;
+ assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32");
+ return false;
+}
+
+// Pin the vtable to this file.
+void SystemZInstrInfo::anchor() {}
+
+SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
+ : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
+ RI(tm), TM(tm) {
+}
+
+// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
+// each having the opcode given by NewOpcode.
+void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
+ unsigned NewOpcode) const {
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineFunction &MF = *MBB->getParent();
+
+ // Get two load or store instructions. Use the original instruction for one
+ // of them (arbitarily the second here) and create a clone for the other.
+ MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
+ MBB->insert(MI, EarlierMI);
+
+ // Set up the two 64-bit registers.
+ MachineOperand &HighRegOp = EarlierMI->getOperand(0);
+ MachineOperand &LowRegOp = MI->getOperand(0);
+ HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
+ LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
+
+ // The address in the first (high) instruction is already correct.
+ // Adjust the offset in the second (low) instruction.
+ MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
+ MachineOperand &LowOffsetOp = MI->getOperand(2);
+ LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
+
+ // Set the opcodes.
+ unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
+ unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
+ assert(HighOpcode && LowOpcode && "Both offsets should be in range");
+
+ EarlierMI->setDesc(get(HighOpcode));
+ MI->setDesc(get(LowOpcode));
+}
+
+// Split ADJDYNALLOC instruction MI.
+void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineFunction &MF = *MBB->getParent();
+ MachineFrameInfo *MFFrame = MF.getFrameInfo();
+ MachineOperand &OffsetMO = MI->getOperand(2);
+
+ uint64_t Offset = (MFFrame->getMaxCallFrameSize() +
+ SystemZMC::CallFrameSize +
+ OffsetMO.getImm());
+ unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
+ assert(NewOpcode && "No support for huge argument lists yet");
+ MI->setDesc(get(NewOpcode));
+ OffsetMO.setImm(Offset);
+}
+
+// MI is an RI-style pseudo instruction. Replace it with LowOpcode
+// if the first operand is a low GR32 and HighOpcode if the first operand
+// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
+// and HighOpcode takes an unsigned 32-bit operand. In those cases,
+// MI has the same kind of operand as LowOpcode, so needs to be converted
+// if HighOpcode is used.
+void SystemZInstrInfo::expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode,
+ bool ConvertHigh) const {
+ unsigned Reg = MI->getOperand(0).getReg();
+ bool IsHigh = isHighReg(Reg);
+ MI->setDesc(get(IsHigh ? HighOpcode : LowOpcode));
+ if (IsHigh && ConvertHigh)
+ MI->getOperand(1).setImm(uint32_t(MI->getOperand(1).getImm()));
+}
+
+// MI is a three-operand RIE-style pseudo instruction. Replace it with
+// LowOpcode3 if the registers are both low GR32s, otherwise use a move
+// followed by HighOpcode or LowOpcode, depending on whether the target
+// is a high or low GR32.
+void SystemZInstrInfo::expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned LowOpcodeK,
+ unsigned HighOpcode) const {
+ unsigned DestReg = MI->getOperand(0).getReg();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ bool DestIsHigh = isHighReg(DestReg);
+ bool SrcIsHigh = isHighReg(SrcReg);
+ if (!DestIsHigh && !SrcIsHigh)
+ MI->setDesc(get(LowOpcodeK));
+ else {
+ emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
+ DestReg, SrcReg, SystemZ::LR, 32,
+ MI->getOperand(1).isKill());
+ MI->setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
+ MI->getOperand(1).setReg(DestReg);
+ }
+}
+
+// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
+// if the first operand is a low GR32 and HighOpcode if the first operand
+// is a high GR32.
+void SystemZInstrInfo::expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode) const {
+ unsigned Reg = MI->getOperand(0).getReg();
+ unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
+ MI->getOperand(2).getImm());
+ MI->setDesc(get(Opcode));
+}
+
+// MI is an RR-style pseudo instruction that zero-extends the low Size bits
+// of one GRX32 into another. Replace it with LowOpcode if both operands
+// are low registers, otherwise use RISB[LH]G.
+void SystemZInstrInfo::expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned Size) const {
+ emitGRX32Move(*MI->getParent(), MI, MI->getDebugLoc(),
+ MI->getOperand(0).getReg(), MI->getOperand(1).getReg(),
+ LowOpcode, Size, MI->getOperand(1).isKill());
+ MI->eraseFromParent();
+}
+
+// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
+// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
+// are low registers, otherwise use RISB[LH]G. Size is the number of bits
+// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
+// KillSrc is true if this move is the last use of SrcReg.
+void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, unsigned DestReg,
+ unsigned SrcReg, unsigned LowLowOpcode,
+ unsigned Size, bool KillSrc) const {
+ unsigned Opcode;
+ bool DestIsHigh = isHighReg(DestReg);
+ bool SrcIsHigh = isHighReg(SrcReg);
+ if (DestIsHigh && SrcIsHigh)
+ Opcode = SystemZ::RISBHH;
+ else if (DestIsHigh && !SrcIsHigh)
+ Opcode = SystemZ::RISBHL;
+ else if (!DestIsHigh && SrcIsHigh)
+ Opcode = SystemZ::RISBLH;
+ else {
+ BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+ unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
+ BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
+ .addReg(DestReg, RegState::Undef)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
+}
+
+// If MI is a simple load or store for a frame object, return the register
+// it loads or stores and set FrameIndex to the index of the frame object.
+// Return 0 otherwise.
+//
+// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
+static int isSimpleMove(const MachineInstr *MI, int &FrameIndex,
+ unsigned Flag) {
+ const MCInstrDesc &MCID = MI->getDesc();
+ if ((MCID.TSFlags & Flag) &&
+ MI->getOperand(1).isFI() &&
+ MI->getOperand(2).getImm() == 0 &&
+ MI->getOperand(3).getReg() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ return 0;
+}
+
+unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
+}
+
+unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
+}
+
+bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI,
+ int &DestFrameIndex,
+ int &SrcFrameIndex) const {
+ // Check for MVC 0(Length,FI1),0(FI2)
+ const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo();
+ if (MI->getOpcode() != SystemZ::MVC ||
+ !MI->getOperand(0).isFI() ||
+ MI->getOperand(1).getImm() != 0 ||
+ !MI->getOperand(3).isFI() ||
+ MI->getOperand(4).getImm() != 0)
+ return false;
+
+ // Check that Length covers the full slots.
+ int64_t Length = MI->getOperand(2).getImm();
+ unsigned FI1 = MI->getOperand(0).getIndex();
+ unsigned FI2 = MI->getOperand(3).getIndex();
+ if (MFI->getObjectSize(FI1) != Length ||
+ MFI->getObjectSize(FI2) != Length)
+ return false;
+
+ DestFrameIndex = FI1;
+ SrcFrameIndex = FI2;
+ return true;
+}
+
+bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ // Most of the code and comments here are boilerplate.
+
+ // Start from the bottom of the block and work up, examining the
+ // terminator instructions.
+ MachineBasicBlock::iterator I = MBB.end();
+ while (I != MBB.begin()) {
+ --I;
+ if (I->isDebugValue())
+ continue;
+
+ // Working from the bottom, when we see a non-terminator instruction, we're
+ // done.
+ if (!isUnpredicatedTerminator(I))
+ break;
+
+ // A terminator that isn't a branch can't easily be handled by this
+ // analysis.
+ if (!I->isBranch())
+ return true;
+
+ // Can't handle indirect branches.
+ SystemZII::Branch Branch(getBranchInfo(I));
+ if (!Branch.Target->isMBB())
+ return true;
+
+ // Punt on compound branches.
+ if (Branch.Type != SystemZII::BranchNormal)
+ return true;
+
+ if (Branch.CCMask == SystemZ::CCMASK_ANY) {
+ // Handle unconditional branches.
+ if (!AllowModify) {
+ TBB = Branch.Target->getMBB();
+ continue;
+ }
+
+ // If the block has any instructions after a JMP, delete them.
+ while (llvm::next(I) != MBB.end())
+ llvm::next(I)->eraseFromParent();
+
+ Cond.clear();
+ FBB = 0;
+
+ // Delete the JMP if it's equivalent to a fall-through.
+ if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
+ TBB = 0;
+ I->eraseFromParent();
+ I = MBB.end();
+ continue;
+ }
+
+ // TBB is used to indicate the unconditinal destination.
+ TBB = Branch.Target->getMBB();
+ continue;
+ }
+
+ // Working from the bottom, handle the first conditional branch.
+ if (Cond.empty()) {
+ // FIXME: add X86-style branch swap
+ FBB = TBB;
+ TBB = Branch.Target->getMBB();
+ Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
+ Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
+ continue;
+ }
+
+ // Handle subsequent conditional branches.
+ assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
+
+ // Only handle the case where all conditional branches branch to the same
+ // destination.
+ if (TBB != Branch.Target->getMBB())
+ return true;
+
+ // If the conditions are the same, we can leave them alone.
+ unsigned OldCCValid = Cond[0].getImm();
+ unsigned OldCCMask = Cond[1].getImm();
+ if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
+ continue;
+
+ // FIXME: Try combining conditions like X86 does. Should be easy on Z!
+ return false;
+ }
+
+ return false;
+}
+
+unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
+ // Most of the code and comments here are boilerplate.
+ MachineBasicBlock::iterator I = MBB.end();
+ unsigned Count = 0;
+
+ while (I != MBB.begin()) {
+ --I;
+ if (I->isDebugValue())
+ continue;
+ if (!I->isBranch())
+ break;
+ if (!getBranchInfo(I).Target->isMBB())
+ break;
+ // Remove the branch.
+ I->eraseFromParent();
+ I = MBB.end();
+ ++Count;
+ }
+
+ return Count;
+}
+
+bool SystemZInstrInfo::
+ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 2 && "Invalid condition");
+ Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
+ return false;
+}
+
+unsigned
+SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const {
+ // In this function we output 32-bit branches, which should always
+ // have enough range. They can be shortened and relaxed by later code
+ // in the pipeline, if desired.
+
+ // Shouldn't be a fall through.
+ assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 2 || Cond.size() == 0) &&
+ "SystemZ branch conditions have one component!");
+
+ if (Cond.empty()) {
+ // Unconditional branch?
+ assert(!FBB && "Unconditional branch with multiple successors!");
+ BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
+ return 1;
+ }
+
+ // Conditional branch.
+ unsigned Count = 0;
+ unsigned CCValid = Cond[0].getImm();
+ unsigned CCMask = Cond[1].getImm();
+ BuildMI(&MBB, DL, get(SystemZ::BRC))
+ .addImm(CCValid).addImm(CCMask).addMBB(TBB);
+ ++Count;
+
+ if (FBB) {
+ // Two-way Conditional branch. Insert the second branch.
+ BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
+ ++Count;
+ }
+ return Count;
+}
+
+bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, unsigned &SrcReg2,
+ int &Mask, int &Value) const {
+ assert(MI->isCompare() && "Caller should have checked for a comparison");
+
+ if (MI->getNumExplicitOperands() == 2 &&
+ MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isImm()) {
+ SrcReg = MI->getOperand(0).getReg();
+ SrcReg2 = 0;
+ Value = MI->getOperand(1).getImm();
+ Mask = ~0;
+ return true;
+ }
+
+ return false;
+}
+
+// If Reg is a virtual register, return its definition, otherwise return null.
+static MachineInstr *getDef(unsigned Reg,
+ const MachineRegisterInfo *MRI) {
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ return 0;
+ return MRI->getUniqueVRegDef(Reg);
+}
+
+// Return true if MI is a shift of type Opcode by Imm bits.
+static bool isShift(MachineInstr *MI, int Opcode, int64_t Imm) {
+ return (MI->getOpcode() == Opcode &&
+ !MI->getOperand(2).getReg() &&
+ MI->getOperand(3).getImm() == Imm);
+}
+
+// If the destination of MI has no uses, delete it as dead.
+static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
+ if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
+ MI->eraseFromParent();
+}
+
+// Compare compares SrcReg against zero. Check whether SrcReg contains
+// the result of an IPM sequence whose input CC survives until Compare,
+// and whether Compare is therefore redundant. Delete it and return
+// true if so.
+static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
+ const MachineRegisterInfo *MRI,
+ const TargetRegisterInfo *TRI) {
+ MachineInstr *LGFR = 0;
+ MachineInstr *RLL = getDef(SrcReg, MRI);
+ if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
+ LGFR = RLL;
+ RLL = getDef(LGFR->getOperand(1).getReg(), MRI);
+ }
+ if (!RLL || !isShift(RLL, SystemZ::RLL, 31))
+ return false;
+
+ MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI);
+ if (!SRL || !isShift(SRL, SystemZ::SRL, SystemZ::IPM_CC))
+ return false;
+
+ MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI);
+ if (!IPM || IPM->getOpcode() != SystemZ::IPM)
+ return false;
+
+ // Check that there are no assignments to CC between the IPM and Compare,
+ if (IPM->getParent() != Compare->getParent())
+ return false;
+ MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare;
+ for (++MBBI; MBBI != MBBE; ++MBBI) {
+ MachineInstr *MI = MBBI;
+ if (MI->modifiesRegister(SystemZ::CC, TRI))
+ return false;
+ }
+
+ Compare->eraseFromParent();
+ if (LGFR)
+ eraseIfDead(LGFR, MRI);
+ eraseIfDead(RLL, MRI);
+ eraseIfDead(SRL, MRI);
+ eraseIfDead(IPM, MRI);
+
+ return true;
+}
+
+bool
+SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
+ unsigned SrcReg, unsigned SrcReg2,
+ int Mask, int Value,
+ const MachineRegisterInfo *MRI) const {
+ assert(!SrcReg2 && "Only optimizing constant comparisons so far");
+ bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
+ if (Value == 0 &&
+ !IsLogical &&
+ removeIPMBasedCompare(Compare, SrcReg, MRI, TM.getRegisterInfo()))
+ return true;
+ return false;
+}
+
+// If Opcode is a move that has a conditional variant, return that variant,
+// otherwise return 0.
+static unsigned getConditionalMove(unsigned Opcode) {
+ switch (Opcode) {
+ case SystemZ::LR: return SystemZ::LOCR;
+ case SystemZ::LGR: return SystemZ::LOCGR;
+ default: return 0;
+ }
+}
+
+bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
+ unsigned Opcode = MI->getOpcode();
+ if (TM.getSubtargetImpl()->hasLoadStoreOnCond() &&
+ getConditionalMove(Opcode))
+ return true;
+ return false;
+}
+
+bool SystemZInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &MBB,
+ unsigned NumCycles, unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const {
+ // For now only convert single instructions.
+ return NumCycles == 1;
+}
+
+bool SystemZInstrInfo::
+isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumCyclesT, unsigned ExtraPredCyclesT,
+ MachineBasicBlock &FMBB,
+ unsigned NumCyclesF, unsigned ExtraPredCyclesF,
+ const BranchProbability &Probability) const {
+ // For now avoid converting mutually-exclusive cases.
+ return false;
+}
+
+bool SystemZInstrInfo::
+PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const {
+ assert(Pred.size() == 2 && "Invalid condition");
+ unsigned CCValid = Pred[0].getImm();
+ unsigned CCMask = Pred[1].getImm();
+ assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
+ unsigned Opcode = MI->getOpcode();
+ if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (unsigned CondOpcode = getConditionalMove(Opcode)) {
+ MI->setDesc(get(CondOpcode));
+ MachineInstrBuilder(*MI->getParent()->getParent(), MI)
+ .addImm(CCValid).addImm(CCMask)
+ .addReg(SystemZ::CC, RegState::Implicit);;
+ return true;
+ }
+ }
+ return false;
+}
+
+void
+SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too.
+ if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
+ copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
+ RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
+ copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
+ RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
+ return;
+ }
+
+ if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
+ emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc);
+ return;
+ }
+
+ // Everything else needs only one instruction.
+ unsigned Opcode;
+ if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
+ Opcode = SystemZ::LGR;
+ else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
+ Opcode = SystemZ::LER;
+ else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
+ Opcode = SystemZ::LDR;
+ else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
+ Opcode = SystemZ::LXR;
+ else
+ llvm_unreachable("Impossible reg-to-reg copy");
+
+ BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+}
+
+void
+SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill,
+ int FrameIdx,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Callers may expect a single instruction, so keep 128-bit moves
+ // together for now and lower them after register allocation.
+ unsigned LoadOpcode, StoreOpcode;
+ getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
+ addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
+ .addReg(SrcReg, getKillRegState(isKill)), FrameIdx);
+}
+
+void
+SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+
+ // Callers may expect a single instruction, so keep 128-bit moves
+ // together for now and lower them after register allocation.
+ unsigned LoadOpcode, StoreOpcode;
+ getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
+ addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
+ FrameIdx);
+}
+
+// Return true if MI is a simple load or store with a 12-bit displacement
+// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
+static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
+ const MCInstrDesc &MCID = MI->getDesc();
+ return ((MCID.TSFlags & Flag) &&
+ isUInt<12>(MI->getOperand(2).getImm()) &&
+ MI->getOperand(3).getReg() == 0);
+}
+
+namespace {
+ struct LogicOp {
+ LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
+ LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
+ : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
+
+ operator bool() const { return RegSize; }
+
+ unsigned RegSize, ImmLSB, ImmSize;
+ };
+}
+
+static LogicOp interpretAndImmediate(unsigned Opcode) {
+ switch (Opcode) {
+ case SystemZ::NILMux: return LogicOp(32, 0, 16);
+ case SystemZ::NIHMux: return LogicOp(32, 16, 16);
+ case SystemZ::NILL64: return LogicOp(64, 0, 16);
+ case SystemZ::NILH64: return LogicOp(64, 16, 16);
+ case SystemZ::NIHL64: return LogicOp(64, 32, 16);
+ case SystemZ::NIHH64: return LogicOp(64, 48, 16);
+ case SystemZ::NIFMux: return LogicOp(32, 0, 32);
+ case SystemZ::NILF64: return LogicOp(64, 0, 32);
+ case SystemZ::NIHF64: return LogicOp(64, 32, 32);
+ default: return LogicOp();
+ }
+}
+
+// Used to return from convertToThreeAddress after replacing two-address
+// instruction OldMI with three-address instruction NewMI.
+static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI,
+ MachineInstr *NewMI,
+ LiveVariables *LV) {
+ if (LV) {
+ unsigned NumOps = OldMI->getNumOperands();
+ for (unsigned I = 1; I < NumOps; ++I) {
+ MachineOperand &Op = OldMI->getOperand(I);
+ if (Op.isReg() && Op.isKill())
+ LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI);
+ }
+ }
+ return NewMI;
+}
+
+MachineInstr *
+SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const {
+ MachineInstr *MI = MBBI;
+ MachineBasicBlock *MBB = MI->getParent();
+ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
+
+ unsigned Opcode = MI->getOpcode();
+ unsigned NumOps = MI->getNumOperands();
+
+ // Try to convert something like SLL into SLLK, if supported.
+ // We prefer to keep the two-operand form where possible both
+ // because it tends to be shorter and because some instructions
+ // have memory forms that can be used during spilling.
+ if (TM.getSubtargetImpl()->hasDistinctOps()) {
+ MachineOperand &Dest = MI->getOperand(0);
+ MachineOperand &Src = MI->getOperand(1);
+ unsigned DestReg = Dest.getReg();
+ unsigned SrcReg = Src.getReg();
+ // AHIMux is only really a three-operand instruction when both operands
+ // are low registers. Try to constrain both operands to be low if
+ // possible.
+ if (Opcode == SystemZ::AHIMux &&
+ TargetRegisterInfo::isVirtualRegister(DestReg) &&
+ TargetRegisterInfo::isVirtualRegister(SrcReg) &&
+ MRI.getRegClass(DestReg)->contains(SystemZ::R1L) &&
+ MRI.getRegClass(SrcReg)->contains(SystemZ::R1L)) {
+ MRI.constrainRegClass(DestReg, &SystemZ::GR32BitRegClass);
+ MRI.constrainRegClass(SrcReg, &SystemZ::GR32BitRegClass);
+ }
+ int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode);
+ if (ThreeOperandOpcode >= 0) {
+ MachineInstrBuilder MIB =
+ BuildMI(*MBB, MBBI, MI->getDebugLoc(), get(ThreeOperandOpcode))
+ .addOperand(Dest);
+ // Keep the kill state, but drop the tied flag.
+ MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg());
+ // Keep the remaining operands as-is.
+ for (unsigned I = 2; I < NumOps; ++I)
+ MIB.addOperand(MI->getOperand(I));
+ return finishConvertToThreeAddress(MI, MIB, LV);
+ }
+ }
+
+ // Try to convert an AND into an RISBG-type instruction.
+ if (LogicOp And = interpretAndImmediate(Opcode)) {
+ uint64_t Imm = MI->getOperand(2).getImm() << And.ImmLSB;
+ // AND IMMEDIATE leaves the other bits of the register unchanged.
+ Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
+ unsigned Start, End;
+ if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
+ unsigned NewOpcode;
+ if (And.RegSize == 64)
+ NewOpcode = SystemZ::RISBG;
+ else {
+ NewOpcode = SystemZ::RISBMux;
+ Start &= 31;
+ End &= 31;
+ }
+ MachineOperand &Dest = MI->getOperand(0);
+ MachineOperand &Src = MI->getOperand(1);
+ MachineInstrBuilder MIB =
+ BuildMI(*MBB, MI, MI->getDebugLoc(), get(NewOpcode))
+ .addOperand(Dest).addReg(0)
+ .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg())
+ .addImm(Start).addImm(End + 128).addImm(0);
+ return finishConvertToThreeAddress(MI, MIB, LV);
+ }
+ }
+ return 0;
+}
+
+MachineInstr *
+SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ unsigned Size = MFI->getObjectSize(FrameIndex);
+ unsigned Opcode = MI->getOpcode();
+
+ if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
+ if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
+ isInt<8>(MI->getOperand(2).getImm()) &&
+ !MI->getOperand(3).getReg()) {
+ // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
+ return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI))
+ .addFrameIndex(FrameIndex).addImm(0)
+ .addImm(MI->getOperand(2).getImm());
+ }
+ return 0;
+ }
+
+ // All other cases require a single operand.
+ if (Ops.size() != 1)
+ return 0;
+
+ unsigned OpNum = Ops[0];
+ assert(Size == MF.getRegInfo()
+ .getRegClass(MI->getOperand(OpNum).getReg())->getSize() &&
+ "Invalid size combination");
+
+ if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) &&
+ OpNum == 0 &&
+ isInt<8>(MI->getOperand(2).getImm())) {
+ // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
+ Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
+ return BuildMI(MF, MI->getDebugLoc(), get(Opcode))
+ .addFrameIndex(FrameIndex).addImm(0)
+ .addImm(MI->getOperand(2).getImm());
+ }
+
+ if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
+ bool Op0IsGPR = (Opcode == SystemZ::LGDR);
+ bool Op1IsGPR = (Opcode == SystemZ::LDGR);
+ // If we're spilling the destination of an LDGR or LGDR, store the
+ // source register instead.
+ if (OpNum == 0) {
+ unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
+ return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
+ .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
+ .addImm(0).addReg(0);
+ }
+ // If we're spilling the source of an LDGR or LGDR, load the
+ // destination register instead.
+ if (OpNum == 1) {
+ unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
+ unsigned Dest = MI->getOperand(0).getReg();
+ return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
+ .addFrameIndex(FrameIndex).addImm(0).addReg(0);
+ }
+ }
+
+ // Look for cases where the source of a simple store or the destination
+ // of a simple load is being spilled. Try to use MVC instead.
+ //
+ // Although MVC is in practice a fast choice in these cases, it is still
+ // logically a bytewise copy. This means that we cannot use it if the
+ // load or store is volatile. We also wouldn't be able to use MVC if
+ // the two memories partially overlap, but that case cannot occur here,
+ // because we know that one of the memories is a full frame index.
+ //
+ // For performance reasons, we also want to avoid using MVC if the addresses
+ // might be equal. We don't worry about that case here, because spill slot
+ // coloring happens later, and because we have special code to remove
+ // MVCs that turn out to be redundant.
+ if (OpNum == 0 && MI->hasOneMemOperand()) {
+ MachineMemOperand *MMO = *MI->memoperands_begin();
+ if (MMO->getSize() == Size && !MMO->isVolatile()) {
+ // Handle conversion of loads.
+ if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
+ return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
+ .addFrameIndex(FrameIndex).addImm(0).addImm(Size)
+ .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
+ .addMemOperand(MMO);
+ }
+ // Handle conversion of stores.
+ if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
+ return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
+ .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
+ .addImm(Size).addFrameIndex(FrameIndex).addImm(0)
+ .addMemOperand(MMO);
+ }
+ }
+ }
+
+ // If the spilled operand is the final one, try to change <INSN>R
+ // into <INSN>.
+ int MemOpcode = SystemZ::getMemOpcode(Opcode);
+ if (MemOpcode >= 0) {
+ unsigned NumOps = MI->getNumExplicitOperands();
+ if (OpNum == NumOps - 1) {
+ const MCInstrDesc &MemDesc = get(MemOpcode);
+ uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
+ assert(AccessBytes != 0 && "Size of access should be known");
+ assert(AccessBytes <= Size && "Access outside the frame index");
+ uint64_t Offset = Size - AccessBytes;
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
+ for (unsigned I = 0; I < OpNum; ++I)
+ MIB.addOperand(MI->getOperand(I));
+ MIB.addFrameIndex(FrameIndex).addImm(Offset);
+ if (MemDesc.TSFlags & SystemZII::HasIndex)
+ MIB.addReg(0);
+ return MIB;
+ }
+ }
+
+ return 0;
+}
+
+MachineInstr *
+SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const {
+ return 0;
+}
+
+bool
+SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+ switch (MI->getOpcode()) {
+ case SystemZ::L128:
+ splitMove(MI, SystemZ::LG);
+ return true;
+
+ case SystemZ::ST128:
+ splitMove(MI, SystemZ::STG);
+ return true;
+
+ case SystemZ::LX:
+ splitMove(MI, SystemZ::LD);
+ return true;
+
+ case SystemZ::STX:
+ splitMove(MI, SystemZ::STD);
+ return true;
+
+ case SystemZ::LBMux:
+ expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
+ return true;
+
+ case SystemZ::LHMux:
+ expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
+ return true;
+
+ case SystemZ::LLCRMux:
+ expandZExtPseudo(MI, SystemZ::LLCR, 8);
+ return true;
+
+ case SystemZ::LLHRMux:
+ expandZExtPseudo(MI, SystemZ::LLHR, 16);
+ return true;
+
+ case SystemZ::LLCMux:
+ expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
+ return true;
+
+ case SystemZ::LLHMux:
+ expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
+ return true;
+
+ case SystemZ::LMux:
+ expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
+ return true;
+
+ case SystemZ::STCMux:
+ expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
+ return true;
+
+ case SystemZ::STHMux:
+ expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
+ return true;
+
+ case SystemZ::STMux:
+ expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
+ return true;
+
+ case SystemZ::LHIMux:
+ expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
+ return true;
+
+ case SystemZ::IIFMux:
+ expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
+ return true;
+
+ case SystemZ::IILMux:
+ expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
+ return true;
+
+ case SystemZ::IIHMux:
+ expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
+ return true;
+
+ case SystemZ::NIFMux:
+ expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
+ return true;
+
+ case SystemZ::NILMux:
+ expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
+ return true;
+
+ case SystemZ::NIHMux:
+ expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
+ return true;
+
+ case SystemZ::OIFMux:
+ expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
+ return true;
+
+ case SystemZ::OILMux:
+ expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
+ return true;
+
+ case SystemZ::OIHMux:
+ expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
+ return true;
+
+ case SystemZ::XIFMux:
+ expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
+ return true;
+
+ case SystemZ::TMLMux:
+ expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
+ return true;
+
+ case SystemZ::TMHMux:
+ expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
+ return true;
+
+ case SystemZ::AHIMux:
+ expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
+ return true;
+
+ case SystemZ::AHIMuxK:
+ expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
+ return true;
+
+ case SystemZ::AFIMux:
+ expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
+ return true;
+
+ case SystemZ::CFIMux:
+ expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
+ return true;
+
+ case SystemZ::CLFIMux:
+ expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
+ return true;
+
+ case SystemZ::CMux:
+ expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
+ return true;
+
+ case SystemZ::CLMux:
+ expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
+ return true;
+
+ case SystemZ::RISBMux: {
+ bool DestIsHigh = isHighReg(MI->getOperand(0).getReg());
+ bool SrcIsHigh = isHighReg(MI->getOperand(2).getReg());
+ if (SrcIsHigh == DestIsHigh)
+ MI->setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
+ else {
+ MI->setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
+ MI->getOperand(5).setImm(MI->getOperand(5).getImm() ^ 32);
+ }
+ return true;
+ }
+
+ case SystemZ::ADJDYNALLOC:
+ splitAdjDynAlloc(MI);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const {
+ if (MI->getOpcode() == TargetOpcode::INLINEASM) {
+ const MachineFunction *MF = MI->getParent()->getParent();
+ const char *AsmStr = MI->getOperand(0).getSymbolName();
+ return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
+ }
+ return MI->getDesc().getSize();
+}
+
+SystemZII::Branch
+SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const {
+ switch (MI->getOpcode()) {
+ case SystemZ::BR:
+ case SystemZ::J:
+ case SystemZ::JG:
+ return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
+ SystemZ::CCMASK_ANY, &MI->getOperand(0));
+
+ case SystemZ::BRC:
+ case SystemZ::BRCL:
+ return SystemZII::Branch(SystemZII::BranchNormal,
+ MI->getOperand(0).getImm(),
+ MI->getOperand(1).getImm(), &MI->getOperand(2));
+
+ case SystemZ::BRCT:
+ return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
+ SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
+
+ case SystemZ::BRCTG:
+ return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
+ SystemZ::CCMASK_CMP_NE, &MI->getOperand(2));
+
+ case SystemZ::CIJ:
+ case SystemZ::CRJ:
+ return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
+
+ case SystemZ::CLIJ:
+ case SystemZ::CLRJ:
+ return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
+
+ case SystemZ::CGIJ:
+ case SystemZ::CGRJ:
+ return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
+
+ case SystemZ::CLGIJ:
+ case SystemZ::CLGRJ:
+ return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
+ MI->getOperand(2).getImm(), &MI->getOperand(3));
+
+ default:
+ llvm_unreachable("Unrecognized branch opcode");
+ }
+}
+
+void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
+ unsigned &LoadOpcode,
+ unsigned &StoreOpcode) const {
+ if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
+ LoadOpcode = SystemZ::L;
+ StoreOpcode = SystemZ::ST;
+ } else if (RC == &SystemZ::GRH32BitRegClass) {
+ LoadOpcode = SystemZ::LFH;
+ StoreOpcode = SystemZ::STFH;
+ } else if (RC == &SystemZ::GRX32BitRegClass) {
+ LoadOpcode = SystemZ::LMux;
+ StoreOpcode = SystemZ::STMux;
+ } else if (RC == &SystemZ::GR64BitRegClass ||
+ RC == &SystemZ::ADDR64BitRegClass) {
+ LoadOpcode = SystemZ::LG;
+ StoreOpcode = SystemZ::STG;
+ } else if (RC == &SystemZ::GR128BitRegClass ||
+ RC == &SystemZ::ADDR128BitRegClass) {
+ LoadOpcode = SystemZ::L128;
+ StoreOpcode = SystemZ::ST128;
+ } else if (RC == &SystemZ::FP32BitRegClass) {
+ LoadOpcode = SystemZ::LE;
+ StoreOpcode = SystemZ::STE;
+ } else if (RC == &SystemZ::FP64BitRegClass) {
+ LoadOpcode = SystemZ::LD;
+ StoreOpcode = SystemZ::STD;
+ } else if (RC == &SystemZ::FP128BitRegClass) {
+ LoadOpcode = SystemZ::LX;
+ StoreOpcode = SystemZ::STX;
+ } else
+ llvm_unreachable("Unsupported regclass to load or store");
+}
+
+unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
+ int64_t Offset) const {
+ const MCInstrDesc &MCID = get(Opcode);
+ int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
+ if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
+ // Get the instruction to use for unsigned 12-bit displacements.
+ int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
+ if (Disp12Opcode >= 0)
+ return Disp12Opcode;
+
+ // All address-related instructions can use unsigned 12-bit
+ // displacements.
+ return Opcode;
+ }
+ if (isInt<20>(Offset) && isInt<20>(Offset2)) {
+ // Get the instruction to use for signed 20-bit displacements.
+ int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
+ if (Disp20Opcode >= 0)
+ return Disp20Opcode;
+
+ // Check whether Opcode allows signed 20-bit displacements.
+ if (MCID.TSFlags & SystemZII::Has20BitOffset)
+ return Opcode;
+ }
+ return 0;
+}
+
+unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
+ switch (Opcode) {
+ case SystemZ::L: return SystemZ::LT;
+ case SystemZ::LY: return SystemZ::LT;
+ case SystemZ::LG: return SystemZ::LTG;
+ case SystemZ::LGF: return SystemZ::LTGF;
+ case SystemZ::LR: return SystemZ::LTR;
+ case SystemZ::LGFR: return SystemZ::LTGFR;
+ case SystemZ::LGR: return SystemZ::LTGR;
+ case SystemZ::LER: return SystemZ::LTEBR;
+ case SystemZ::LDR: return SystemZ::LTDBR;
+ case SystemZ::LXR: return SystemZ::LTXBR;
+ default: return 0;
+ }
+}
+
+// Return true if Mask matches the regexp 0*1+0*, given that zero masks
+// have already been filtered out. Store the first set bit in LSB and
+// the number of set bits in Length if so.
+static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
+ unsigned First = findFirstSet(Mask);
+ uint64_t Top = (Mask >> First) + 1;
+ if ((Top & -Top) == Top) {
+ LSB = First;
+ Length = findFirstSet(Top);
+ return true;
+ }
+ return false;
+}
+
+bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
+ unsigned &Start, unsigned &End) const {
+ // Reject trivial all-zero masks.
+ if (Mask == 0)
+ return false;
+
+ // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
+ // the msb and End specifies the index of the lsb.
+ unsigned LSB, Length;
+ if (isStringOfOnes(Mask, LSB, Length)) {
+ Start = 63 - (LSB + Length - 1);
+ End = 63 - LSB;
+ return true;
+ }
+
+ // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
+ // of the low 1s and End specifies the lsb of the high 1s.
+ if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
+ assert(LSB > 0 && "Bottom bit must be set");
+ assert(LSB + Length < BitSize && "Top bit must be set");
+ Start = 63 - (LSB - 1);
+ End = 63 - (LSB + Length);
+ return true;
+ }
+
+ return false;
+}
+
+unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode,
+ const MachineInstr *MI) const {
+ switch (Opcode) {
+ case SystemZ::CR:
+ return SystemZ::CRJ;
+ case SystemZ::CGR:
+ return SystemZ::CGRJ;
+ case SystemZ::CHI:
+ return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0;
+ case SystemZ::CGHI:
+ return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0;
+ case SystemZ::CLR:
+ return SystemZ::CLRJ;
+ case SystemZ::CLGR:
+ return SystemZ::CLGRJ;
+ case SystemZ::CLFI:
+ return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLIJ : 0;
+ case SystemZ::CLGFI:
+ return MI && isUInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CLGIJ : 0;
+ default:
+ return 0;
+ }
+}
+
+void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned Reg, uint64_t Value) const {
+ DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
+ unsigned Opcode;
+ if (isInt<16>(Value))
+ Opcode = SystemZ::LGHI;
+ else if (SystemZ::isImmLL(Value))
+ Opcode = SystemZ::LLILL;
+ else if (SystemZ::isImmLH(Value)) {
+ Opcode = SystemZ::LLILH;
+ Value >>= 16;
+ } else {
+ assert(isInt<32>(Value) && "Huge values not handled yet");
+ Opcode = SystemZ::LGFI;
+ }
+ BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
new file mode 100644
index 000000000000..be4c8fe2add2
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -0,0 +1,257 @@
+//===-- SystemZInstrInfo.h - SystemZ instruction information ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SystemZ implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_SYSTEMZINSTRINFO_H
+#define LLVM_TARGET_SYSTEMZINSTRINFO_H
+
+#include "SystemZ.h"
+#include "SystemZRegisterInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "SystemZGenInstrInfo.inc"
+
+namespace llvm {
+
+class SystemZTargetMachine;
+
+namespace SystemZII {
+ enum {
+ // See comments in SystemZInstrFormats.td.
+ SimpleBDXLoad = (1 << 0),
+ SimpleBDXStore = (1 << 1),
+ Has20BitOffset = (1 << 2),
+ HasIndex = (1 << 3),
+ Is128Bit = (1 << 4),
+ AccessSizeMask = (31 << 5),
+ AccessSizeShift = 5,
+ CCValuesMask = (15 << 10),
+ CCValuesShift = 10,
+ CompareZeroCCMaskMask = (15 << 14),
+ CompareZeroCCMaskShift = 14,
+ CCMaskFirst = (1 << 18),
+ CCMaskLast = (1 << 19),
+ IsLogical = (1 << 20)
+ };
+ static inline unsigned getAccessSize(unsigned int Flags) {
+ return (Flags & AccessSizeMask) >> AccessSizeShift;
+ }
+ static inline unsigned getCCValues(unsigned int Flags) {
+ return (Flags & CCValuesMask) >> CCValuesShift;
+ }
+ static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
+ return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift;
+ }
+
+ // SystemZ MachineOperand target flags.
+ enum {
+ // Masks out the bits for the access model.
+ MO_SYMBOL_MODIFIER = (1 << 0),
+
+ // @GOT (aka @GOTENT)
+ MO_GOT = (1 << 0)
+ };
+ // Classifies a branch.
+ enum BranchType {
+ // An instruction that branches on the current value of CC.
+ BranchNormal,
+
+ // An instruction that peforms a 32-bit signed comparison and branches
+ // on the result.
+ BranchC,
+
+ // An instruction that peforms a 32-bit unsigned comparison and branches
+ // on the result.
+ BranchCL,
+
+ // An instruction that peforms a 64-bit signed comparison and branches
+ // on the result.
+ BranchCG,
+
+ // An instruction that peforms a 64-bit unsigned comparison and branches
+ // on the result.
+ BranchCLG,
+
+ // An instruction that decrements a 32-bit register and branches if
+ // the result is nonzero.
+ BranchCT,
+
+ // An instruction that decrements a 64-bit register and branches if
+ // the result is nonzero.
+ BranchCTG
+ };
+ // Information about a branch instruction.
+ struct Branch {
+ // The type of the branch.
+ BranchType Type;
+
+ // CCMASK_<N> is set if CC might be equal to N.
+ unsigned CCValid;
+
+ // CCMASK_<N> is set if the branch should be taken when CC == N.
+ unsigned CCMask;
+
+ // The target of the branch.
+ const MachineOperand *Target;
+
+ Branch(BranchType type, unsigned ccValid, unsigned ccMask,
+ const MachineOperand *target)
+ : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
+ };
+}
+
+class SystemZInstrInfo : public SystemZGenInstrInfo {
+ const SystemZRegisterInfo RI;
+ SystemZTargetMachine &TM;
+
+ void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const;
+ void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
+ void expandRIPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode, bool ConvertHigh) const;
+ void expandRIEPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned LowOpcodeK, unsigned HighOpcode) const;
+ void expandRXYPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned HighOpcode) const;
+ void expandZExtPseudo(MachineInstr *MI, unsigned LowOpcode,
+ unsigned Size) const;
+ void emitGRX32Move(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ unsigned LowLowOpcode, unsigned Size, bool KillSrc) const;
+ virtual void anchor();
+
+public:
+ explicit SystemZInstrInfo(SystemZTargetMachine &TM);
+
+ // Override TargetInstrInfo.
+ virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const LLVM_OVERRIDE;
+ virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const LLVM_OVERRIDE;
+ virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
+ int &SrcFrameIndex) const LLVM_OVERRIDE;
+ virtual bool AnalyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const LLVM_OVERRIDE;
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const LLVM_OVERRIDE;
+ virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const LLVM_OVERRIDE;
+ bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
+ unsigned &SrcReg2, int &Mask, int &Value) const
+ LLVM_OVERRIDE;
+ bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
+ unsigned SrcReg2, int Mask, int Value,
+ const MachineRegisterInfo *MRI) const LLVM_OVERRIDE;
+ virtual bool isPredicable(MachineInstr *MI) const LLVM_OVERRIDE;
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const
+ LLVM_OVERRIDE;
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumCyclesT,
+ unsigned ExtraPredCyclesT,
+ MachineBasicBlock &FMBB,
+ unsigned NumCyclesF,
+ unsigned ExtraPredCyclesF,
+ const BranchProbability &Probability) const
+ LLVM_OVERRIDE;
+ virtual bool
+ PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const
+ LLVM_OVERRIDE;
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const LLVM_OVERRIDE;
+ virtual void
+ storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const LLVM_OVERRIDE;
+ virtual void
+ loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const LLVM_OVERRIDE;
+ virtual MachineInstr *
+ convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const;
+ virtual MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const;
+ virtual MachineInstr *
+ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const;
+ virtual bool
+ expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const LLVM_OVERRIDE;
+ virtual bool
+ ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
+ LLVM_OVERRIDE;
+
+ // Return the SystemZRegisterInfo, which this class owns.
+ const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
+
+ // Return the size in bytes of MI.
+ uint64_t getInstSizeInBytes(const MachineInstr *MI) const;
+
+ // Return true if MI is a conditional or unconditional branch.
+ // When returning true, set Cond to the mask of condition-code
+ // values on which the instruction will branch, and set Target
+ // to the operand that contains the branch target. This target
+ // can be a register or a basic block.
+ SystemZII::Branch getBranchInfo(const MachineInstr *MI) const;
+
+ // Get the load and store opcodes for a given register class.
+ void getLoadStoreOpcodes(const TargetRegisterClass *RC,
+ unsigned &LoadOpcode, unsigned &StoreOpcode) const;
+
+ // Opcode is the opcode of an instruction that has an address operand,
+ // and the caller wants to perform that instruction's operation on an
+ // address that has displacement Offset. Return the opcode of a suitable
+ // instruction (which might be Opcode itself) or 0 if no such instruction
+ // exists.
+ unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset) const;
+
+ // If Opcode is a load instruction that has a LOAD AND TEST form,
+ // return the opcode for the testing form, otherwise return 0.
+ unsigned getLoadAndTest(unsigned Opcode) const;
+
+ // Return true if ROTATE AND ... SELECTED BITS can be used to select bits
+ // Mask of the R2 operand, given that only the low BitSize bits of Mask are
+ // significant. Set Start and End to the I3 and I4 operands if so.
+ bool isRxSBGMask(uint64_t Mask, unsigned BitSize,
+ unsigned &Start, unsigned &End) const;
+
+ // If Opcode is a COMPARE opcode for which an associated COMPARE AND
+ // BRANCH exists, return the opcode for the latter, otherwise return 0.
+ // MI, if nonnull, is the compare instruction.
+ unsigned getCompareAndBranch(unsigned Opcode,
+ const MachineInstr *MI = 0) const;
+
+ // Emit code before MBBI in MI to move immediate value Value into
+ // physical register Reg.
+ void loadImmediate(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned Reg, uint64_t Value) const;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
new file mode 100644
index 000000000000..6524e442b63d
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -0,0 +1,1393 @@
+//===-- SystemZInstrInfo.td - General SystemZ instructions ----*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Stack allocation
+//===----------------------------------------------------------------------===//
+
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt),
+ [(callseq_start timm:$amt)]>;
+def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
+let neverHasSideEffects = 1 in {
+ // Takes as input the value of the stack pointer after a dynamic allocation
+ // has been made. Sets the output to the address of the dynamically-
+ // allocated area itself, skipping the outgoing arguments.
+ //
+ // This expands to an LA or LAY instruction. We restrict the offset
+ // to the range of LA and keep the LAY range in reserve for when
+ // the size of the outgoing arguments is added.
+ def ADJDYNALLOC : Pseudo<(outs GR64:$dst), (ins dynalloc12only:$src),
+ [(set GR64:$dst, dynalloc12only:$src)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Control flow instructions
+//===----------------------------------------------------------------------===//
+
+// A return instruction (br %r14).
+let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
+ def Return : Alias<2, (outs), (ins), [(z_retflag)]>;
+
+// Unconditional branches. R1 is the condition-code mask (all 1s).
+let isBranch = 1, isTerminator = 1, isBarrier = 1, R1 = 15 in {
+ let isIndirectBranch = 1 in
+ def BR : InstRR<0x07, (outs), (ins ADDR64:$R2),
+ "br\t$R2", [(brind ADDR64:$R2)]>;
+
+ // An assembler extended mnemonic for BRC.
+ def J : InstRI<0xA74, (outs), (ins brtarget16:$I2), "j\t$I2",
+ [(br bb:$I2)]>;
+
+ // An assembler extended mnemonic for BRCL. (The extension is "G"
+ // rather than "L" because "JL" is "Jump if Less".)
+ def JG : InstRIL<0xC04, (outs), (ins brtarget32:$I2), "jg\t$I2", []>;
+}
+
+// Conditional branches. It's easier for LLVM to handle these branches
+// in their raw BRC/BRCL form, with the 4-bit condition-code mask being
+// the first operand. It seems friendlier to use mnemonic forms like
+// JE and JLH when writing out the assembly though.
+let isBranch = 1, isTerminator = 1, Uses = [CC] in {
+ let isCodeGenOnly = 1, CCMaskFirst = 1 in {
+ def BRC : InstRI<0xA74, (outs), (ins cond4:$valid, cond4:$R1,
+ brtarget16:$I2), "j$R1\t$I2",
+ [(z_br_ccmask cond4:$valid, cond4:$R1, bb:$I2)]>;
+ def BRCL : InstRIL<0xC04, (outs), (ins cond4:$valid, cond4:$R1,
+ brtarget32:$I2), "jg$R1\t$I2", []>;
+ }
+ def AsmBRC : InstRI<0xA74, (outs), (ins uimm8zx4:$R1, brtarget16:$I2),
+ "brc\t$R1, $I2", []>;
+ def AsmBRCL : InstRIL<0xC04, (outs), (ins uimm8zx4:$R1, brtarget32:$I2),
+ "brcl\t$R1, $I2", []>;
+ def AsmBCR : InstRR<0x07, (outs), (ins uimm8zx4:$R1, GR64:$R2),
+ "bcr\t$R1, $R2", []>;
+}
+
+// Fused compare-and-branch instructions. As for normal branches,
+// we handle these instructions internally in their raw CRJ-like form,
+// but use assembly macros like CRJE when writing them out.
+//
+// These instructions do not use or clobber the condition codes.
+// We nevertheless pretend that they clobber CC, so that we can lower
+// them to separate comparisons and BRCLs if the branch ends up being
+// out of range.
+multiclass CompareBranches<Operand ccmask, string pos1, string pos2> {
+ let isBranch = 1, isTerminator = 1, Defs = [CC] in {
+ def RJ : InstRIEb<0xEC76, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "crj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def GRJ : InstRIEb<0xEC64, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "cgrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def IJ : InstRIEc<0xEC7E, (outs), (ins GR32:$R1, imm32sx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "cij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ def GIJ : InstRIEc<0xEC7C, (outs), (ins GR64:$R1, imm64sx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "cgij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ def LRJ : InstRIEb<0xEC77, (outs), (ins GR32:$R1, GR32:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def LGRJ : InstRIEb<0xEC65, (outs), (ins GR64:$R1, GR64:$R2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clgrj"##pos1##"\t$R1, $R2, "##pos2##"$RI4", []>;
+ def LIJ : InstRIEc<0xEC7F, (outs), (ins GR32:$R1, imm32zx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ def LGIJ : InstRIEc<0xEC7D, (outs), (ins GR64:$R1, imm64zx8:$I2, ccmask:$M3,
+ brtarget16:$RI4),
+ "clgij"##pos1##"\t$R1, $I2, "##pos2##"$RI4", []>;
+ }
+}
+let isCodeGenOnly = 1 in
+ defm C : CompareBranches<cond4, "$M3", "">;
+defm AsmC : CompareBranches<uimm8zx4, "", "$M3, ">;
+
+// Define AsmParser mnemonics for each general condition-code mask
+// (integer or floating-point)
+multiclass CondExtendedMnemonic<bits<4> ccmask, string name> {
+ let R1 = ccmask in {
+ def J : InstRI<0xA74, (outs), (ins brtarget16:$I2),
+ "j"##name##"\t$I2", []>;
+ def JG : InstRIL<0xC04, (outs), (ins brtarget32:$I2),
+ "jg"##name##"\t$I2", []>;
+ def BR : InstRR<0x07, (outs), (ins ADDR64:$R2), "b"##name##"r\t$R2", []>;
+ }
+ def LOCR : FixedCondUnaryRRF<"locr"##name, 0xB9F2, GR32, GR32, ccmask>;
+ def LOCGR : FixedCondUnaryRRF<"locgr"##name, 0xB9E2, GR64, GR64, ccmask>;
+ def LOC : FixedCondUnaryRSY<"loc"##name, 0xEBF2, GR32, ccmask, 4>;
+ def LOCG : FixedCondUnaryRSY<"locg"##name, 0xEBE2, GR64, ccmask, 8>;
+ def STOC : FixedCondStoreRSY<"stoc"##name, 0xEBF3, GR32, ccmask, 4>;
+ def STOCG : FixedCondStoreRSY<"stocg"##name, 0xEBE3, GR64, ccmask, 8>;
+}
+defm AsmO : CondExtendedMnemonic<1, "o">;
+defm AsmH : CondExtendedMnemonic<2, "h">;
+defm AsmNLE : CondExtendedMnemonic<3, "nle">;
+defm AsmL : CondExtendedMnemonic<4, "l">;
+defm AsmNHE : CondExtendedMnemonic<5, "nhe">;
+defm AsmLH : CondExtendedMnemonic<6, "lh">;
+defm AsmNE : CondExtendedMnemonic<7, "ne">;
+defm AsmE : CondExtendedMnemonic<8, "e">;
+defm AsmNLH : CondExtendedMnemonic<9, "nlh">;
+defm AsmHE : CondExtendedMnemonic<10, "he">;
+defm AsmNL : CondExtendedMnemonic<11, "nl">;
+defm AsmLE : CondExtendedMnemonic<12, "le">;
+defm AsmNH : CondExtendedMnemonic<13, "nh">;
+defm AsmNO : CondExtendedMnemonic<14, "no">;
+
+// Define AsmParser mnemonics for each integer condition-code mask.
+// This is like the list above, except that condition 3 is not possible
+// and that the low bit of the mask is therefore always 0. This means
+// that each condition has two names. Conditions "o" and "no" are not used.
+//
+// We don't make one of the two names an alias of the other because
+// we need the custom parsing routines to select the correct register class.
+multiclass IntCondExtendedMnemonicA<bits<4> ccmask, string name> {
+ let M3 = ccmask in {
+ def CR : InstRIEb<0xEC76, (outs), (ins GR32:$R1, GR32:$R2,
+ brtarget16:$RI4),
+ "crj"##name##"\t$R1, $R2, $RI4", []>;
+ def CGR : InstRIEb<0xEC64, (outs), (ins GR64:$R1, GR64:$R2,
+ brtarget16:$RI4),
+ "cgrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CI : InstRIEc<0xEC7E, (outs), (ins GR32:$R1, imm32sx8:$I2,
+ brtarget16:$RI4),
+ "cij"##name##"\t$R1, $I2, $RI4", []>;
+ def CGI : InstRIEc<0xEC7C, (outs), (ins GR64:$R1, imm64sx8:$I2,
+ brtarget16:$RI4),
+ "cgij"##name##"\t$R1, $I2, $RI4", []>;
+ def CLR : InstRIEb<0xEC77, (outs), (ins GR32:$R1, GR32:$R2,
+ brtarget16:$RI4),
+ "clrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CLGR : InstRIEb<0xEC65, (outs), (ins GR64:$R1, GR64:$R2,
+ brtarget16:$RI4),
+ "clgrj"##name##"\t$R1, $R2, $RI4", []>;
+ def CLI : InstRIEc<0xEC7F, (outs), (ins GR32:$R1, imm32zx8:$I2,
+ brtarget16:$RI4),
+ "clij"##name##"\t$R1, $I2, $RI4", []>;
+ def CLGI : InstRIEc<0xEC7D, (outs), (ins GR64:$R1, imm64zx8:$I2,
+ brtarget16:$RI4),
+ "clgij"##name##"\t$R1, $I2, $RI4", []>;
+ }
+}
+multiclass IntCondExtendedMnemonic<bits<4> ccmask, string name1, string name2>
+ : IntCondExtendedMnemonicA<ccmask, name1> {
+ let isAsmParserOnly = 1 in
+ defm Alt : IntCondExtendedMnemonicA<ccmask, name2>;
+}
+defm AsmJH : IntCondExtendedMnemonic<2, "h", "nle">;
+defm AsmJL : IntCondExtendedMnemonic<4, "l", "nhe">;
+defm AsmJLH : IntCondExtendedMnemonic<6, "lh", "ne">;
+defm AsmJE : IntCondExtendedMnemonic<8, "e", "nlh">;
+defm AsmJHE : IntCondExtendedMnemonic<10, "he", "nl">;
+defm AsmJLE : IntCondExtendedMnemonic<12, "le", "nh">;
+
+// Decrement a register and branch if it is nonzero. These don't clobber CC,
+// but we might need to split long branches into sequences that do.
+let Defs = [CC] in {
+ def BRCT : BranchUnaryRI<"brct", 0xA76, GR32>;
+ def BRCTG : BranchUnaryRI<"brctg", 0xA77, GR64>;
+}
+
+//===----------------------------------------------------------------------===//
+// Select instructions
+//===----------------------------------------------------------------------===//
+
+def Select32Mux : SelectWrapper<GRX32>, Requires<[FeatureHighWord]>;
+def Select32 : SelectWrapper<GR32>;
+def Select64 : SelectWrapper<GR64>;
+
+// We don't define 32-bit Mux stores because the low-only STOC should
+// always be used if possible.
+defm CondStore8Mux : CondStores<GRX32, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>,
+ Requires<[FeatureHighWord]>;
+defm CondStore16Mux : CondStores<GRX32, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>,
+ Requires<[FeatureHighWord]>;
+defm CondStore8 : CondStores<GR32, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>;
+defm CondStore16 : CondStores<GR32, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>;
+defm CondStore32 : CondStores<GR32, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
+defm : CondStores64<CondStore8, CondStore8Inv, nonvolatile_truncstorei8,
+ nonvolatile_anyextloadi8, bdxaddr20only>;
+defm : CondStores64<CondStore16, CondStore16Inv, nonvolatile_truncstorei16,
+ nonvolatile_anyextloadi16, bdxaddr20only>;
+defm : CondStores64<CondStore32, CondStore32Inv, nonvolatile_truncstorei32,
+ nonvolatile_anyextloadi32, bdxaddr20only>;
+defm CondStore64 : CondStores<GR64, nonvolatile_store,
+ nonvolatile_load, bdxaddr20only>;
+
+//===----------------------------------------------------------------------===//
+// Call instructions
+//===----------------------------------------------------------------------===//
+
+// The definitions here are for the call-clobbered registers.
+let isCall = 1, Defs = [R0D, R1D, R2D, R3D, R4D, R5D, R14D,
+ F0D, F1D, F2D, F3D, F4D, F5D, F6D, F7D, CC] in {
+ def CallBRASL : Alias<6, (outs), (ins pcrel32:$I2, variable_ops),
+ [(z_call pcrel32:$I2)]>;
+ def CallBASR : Alias<2, (outs), (ins ADDR64:$R2, variable_ops),
+ [(z_call ADDR64:$R2)]>;
+}
+
+// Sibling calls. Indirect sibling calls must be via R1, since R2 upwards
+// are argument registers and since branching to R0 is a no-op.
+let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in {
+ def CallJG : Alias<6, (outs), (ins pcrel32:$I2),
+ [(z_sibcall pcrel32:$I2)]>;
+ let Uses = [R1D] in
+ def CallBR : Alias<2, (outs), (ins), [(z_sibcall R1D)]>;
+}
+
+// Define the general form of the call instructions for the asm parser.
+// These instructions don't hard-code %r14 as the return address register.
+def BRAS : InstRI<0xA75, (outs), (ins GR64:$R1, brtarget16:$I2),
+ "bras\t$R1, $I2", []>;
+def BRASL : InstRIL<0xC05, (outs), (ins GR64:$R1, brtarget32:$I2),
+ "brasl\t$R1, $I2", []>;
+def BASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2),
+ "basr\t$R1, $R2", []>;
+
+//===----------------------------------------------------------------------===//
+// Move instructions
+//===----------------------------------------------------------------------===//
+
+// Register moves.
+let neverHasSideEffects = 1 in {
+ // Expands to LR, RISBHG or RISBLG, depending on the choice of registers.
+ def LRMux : UnaryRRPseudo<"l", null_frag, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+ def LR : UnaryRR <"l", 0x18, null_frag, GR32, GR32>;
+ def LGR : UnaryRRE<"lg", 0xB904, null_frag, GR64, GR64>;
+}
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
+ def LTR : UnaryRR <"lt", 0x12, null_frag, GR32, GR32>;
+ def LTGR : UnaryRRE<"ltg", 0xB902, null_frag, GR64, GR64>;
+}
+
+// Move on condition.
+let isCodeGenOnly = 1, Uses = [CC] in {
+ def LOCR : CondUnaryRRF<"loc", 0xB9F2, GR32, GR32>;
+ def LOCGR : CondUnaryRRF<"locg", 0xB9E2, GR64, GR64>;
+}
+let Uses = [CC] in {
+ def AsmLOCR : AsmCondUnaryRRF<"loc", 0xB9F2, GR32, GR32>;
+ def AsmLOCGR : AsmCondUnaryRRF<"locg", 0xB9E2, GR64, GR64>;
+}
+
+// Immediate moves.
+let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
+ isReMaterializable = 1 in {
+ // 16-bit sign-extended immediates. LHIMux expands to LHI or IIHF,
+ // deopending on the choice of register.
+ def LHIMux : UnaryRIPseudo<bitconvert, GRX32, imm32sx16>,
+ Requires<[FeatureHighWord]>;
+ def LHI : UnaryRI<"lhi", 0xA78, bitconvert, GR32, imm32sx16>;
+ def LGHI : UnaryRI<"lghi", 0xA79, bitconvert, GR64, imm64sx16>;
+
+ // Other 16-bit immediates.
+ def LLILL : UnaryRI<"llill", 0xA5F, bitconvert, GR64, imm64ll16>;
+ def LLILH : UnaryRI<"llilh", 0xA5E, bitconvert, GR64, imm64lh16>;
+ def LLIHL : UnaryRI<"llihl", 0xA5D, bitconvert, GR64, imm64hl16>;
+ def LLIHH : UnaryRI<"llihh", 0xA5C, bitconvert, GR64, imm64hh16>;
+
+ // 32-bit immediates.
+ def LGFI : UnaryRIL<"lgfi", 0xC01, bitconvert, GR64, imm64sx32>;
+ def LLILF : UnaryRIL<"llilf", 0xC0F, bitconvert, GR64, imm64lf32>;
+ def LLIHF : UnaryRIL<"llihf", 0xC0E, bitconvert, GR64, imm64hf32>;
+}
+
+// Register loads.
+let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
+ // Expands to L, LY or LFH, depending on the choice of register.
+ def LMux : UnaryRXYPseudo<"l", load, GRX32, 4>,
+ Requires<[FeatureHighWord]>;
+ defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32, 4>;
+ def LFH : UnaryRXY<"lfh", 0xE3CA, load, GRH32, 4>,
+ Requires<[FeatureHighWord]>;
+ def LG : UnaryRXY<"lg", 0xE304, load, GR64, 8>;
+
+ // These instructions are split after register allocation, so we don't
+ // want a custom inserter.
+ let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
+ def L128 : Pseudo<(outs GR128:$dst), (ins bdxaddr20only128:$src),
+ [(set GR128:$dst, (load bdxaddr20only128:$src))]>;
+ }
+}
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
+ def LT : UnaryRXY<"lt", 0xE312, load, GR32, 4>;
+ def LTG : UnaryRXY<"ltg", 0xE302, load, GR64, 8>;
+}
+
+let canFoldAsLoad = 1 in {
+ def LRL : UnaryRILPC<"lrl", 0xC4D, aligned_load, GR32>;
+ def LGRL : UnaryRILPC<"lgrl", 0xC48, aligned_load, GR64>;
+}
+
+// Load on condition.
+let isCodeGenOnly = 1, Uses = [CC] in {
+ def LOC : CondUnaryRSY<"loc", 0xEBF2, nonvolatile_load, GR32, 4>;
+ def LOCG : CondUnaryRSY<"locg", 0xEBE2, nonvolatile_load, GR64, 8>;
+}
+let Uses = [CC] in {
+ def AsmLOC : AsmCondUnaryRSY<"loc", 0xEBF2, GR32, 4>;
+ def AsmLOCG : AsmCondUnaryRSY<"locg", 0xEBE2, GR64, 8>;
+}
+
+// Register stores.
+let SimpleBDXStore = 1 in {
+ // Expands to ST, STY or STFH, depending on the choice of register.
+ def STMux : StoreRXYPseudo<store, GRX32, 4>,
+ Requires<[FeatureHighWord]>;
+ defm ST : StoreRXPair<"st", 0x50, 0xE350, store, GR32, 4>;
+ def STFH : StoreRXY<"stfh", 0xE3CB, store, GRH32, 4>,
+ Requires<[FeatureHighWord]>;
+ def STG : StoreRXY<"stg", 0xE324, store, GR64, 8>;
+
+ // These instructions are split after register allocation, so we don't
+ // want a custom inserter.
+ let Has20BitOffset = 1, HasIndex = 1, Is128Bit = 1 in {
+ def ST128 : Pseudo<(outs), (ins GR128:$src, bdxaddr20only128:$dst),
+ [(store GR128:$src, bdxaddr20only128:$dst)]>;
+ }
+}
+def STRL : StoreRILPC<"strl", 0xC4F, aligned_store, GR32>;
+def STGRL : StoreRILPC<"stgrl", 0xC4B, aligned_store, GR64>;
+
+// Store on condition.
+let isCodeGenOnly = 1, Uses = [CC] in {
+ def STOC : CondStoreRSY<"stoc", 0xEBF3, GR32, 4>;
+ def STOCG : CondStoreRSY<"stocg", 0xEBE3, GR64, 8>;
+}
+let Uses = [CC] in {
+ def AsmSTOC : AsmCondStoreRSY<"stoc", 0xEBF3, GR32, 4>;
+ def AsmSTOCG : AsmCondStoreRSY<"stocg", 0xEBE3, GR64, 8>;
+}
+
+// 8-bit immediate stores to 8-bit fields.
+defm MVI : StoreSIPair<"mvi", 0x92, 0xEB52, truncstorei8, imm32zx8trunc>;
+
+// 16-bit immediate stores to 16-, 32- or 64-bit fields.
+def MVHHI : StoreSIL<"mvhhi", 0xE544, truncstorei16, imm32sx16trunc>;
+def MVHI : StoreSIL<"mvhi", 0xE54C, store, imm32sx16>;
+def MVGHI : StoreSIL<"mvghi", 0xE548, store, imm64sx16>;
+
+// Memory-to-memory moves.
+let mayLoad = 1, mayStore = 1 in
+ defm MVC : MemorySS<"mvc", 0xD2, z_mvc, z_mvc_loop>;
+
+// String moves.
+let mayLoad = 1, mayStore = 1, Defs = [CC], Uses = [R0L] in
+ defm MVST : StringRRE<"mvst", 0xB255, z_stpcpy>;
+
+//===----------------------------------------------------------------------===//
+// Sign extensions
+//===----------------------------------------------------------------------===//
+//
+// Note that putting these before zero extensions mean that we will prefer
+// them for anyextload*. There's not really much to choose between the two
+// either way, but signed-extending loads have a short LH and a long LHY,
+// while zero-extending loads have only the long LLH.
+//
+//===----------------------------------------------------------------------===//
+
+// 32-bit extensions from registers.
+let neverHasSideEffects = 1 in {
+ def LBR : UnaryRRE<"lb", 0xB926, sext8, GR32, GR32>;
+ def LHR : UnaryRRE<"lh", 0xB927, sext16, GR32, GR32>;
+}
+
+// 64-bit extensions from registers.
+let neverHasSideEffects = 1 in {
+ def LGBR : UnaryRRE<"lgb", 0xB906, sext8, GR64, GR64>;
+ def LGHR : UnaryRRE<"lgh", 0xB907, sext16, GR64, GR64>;
+ def LGFR : UnaryRRE<"lgf", 0xB914, sext32, GR64, GR32>;
+}
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LTGFR : UnaryRRE<"ltgf", 0xB912, null_frag, GR64, GR64>;
+
+// Match 32-to-64-bit sign extensions in which the source is already
+// in a 64-bit register.
+def : Pat<(sext_inreg GR64:$src, i32),
+ (LGFR (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
+
+// 32-bit extensions from 8-bit memory. LBMux expands to LB or LBH,
+// depending on the choice of register.
+def LBMux : UnaryRXYPseudo<"lb", asextloadi8, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+def LB : UnaryRXY<"lb", 0xE376, asextloadi8, GR32, 1>;
+def LBH : UnaryRXY<"lbh", 0xE3C0, asextloadi8, GRH32, 1>,
+ Requires<[FeatureHighWord]>;
+
+// 32-bit extensions from 16-bit memory. LHMux expands to LH or LHH,
+// depending on the choice of register.
+def LHMux : UnaryRXYPseudo<"lh", asextloadi16, GRX32, 2>,
+ Requires<[FeatureHighWord]>;
+defm LH : UnaryRXPair<"lh", 0x48, 0xE378, asextloadi16, GR32, 2>;
+def LHH : UnaryRXY<"lhh", 0xE3C4, asextloadi16, GRH32, 2>,
+ Requires<[FeatureHighWord]>;
+def LHRL : UnaryRILPC<"lhrl", 0xC45, aligned_asextloadi16, GR32>;
+
+// 64-bit extensions from memory.
+def LGB : UnaryRXY<"lgb", 0xE377, asextloadi8, GR64, 1>;
+def LGH : UnaryRXY<"lgh", 0xE315, asextloadi16, GR64, 2>;
+def LGF : UnaryRXY<"lgf", 0xE314, asextloadi32, GR64, 4>;
+def LGHRL : UnaryRILPC<"lghrl", 0xC44, aligned_asextloadi16, GR64>;
+def LGFRL : UnaryRILPC<"lgfrl", 0xC4C, aligned_asextloadi32, GR64>;
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LTGF : UnaryRXY<"ltgf", 0xE332, asextloadi32, GR64, 4>;
+
+//===----------------------------------------------------------------------===//
+// Zero extensions
+//===----------------------------------------------------------------------===//
+
+// 32-bit extensions from registers.
+let neverHasSideEffects = 1 in {
+ // Expands to LLCR or RISB[LH]G, depending on the choice of registers.
+ def LLCRMux : UnaryRRPseudo<"llc", zext8, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+ def LLCR : UnaryRRE<"llc", 0xB994, zext8, GR32, GR32>;
+ // Expands to LLHR or RISB[LH]G, depending on the choice of registers.
+ def LLHRMux : UnaryRRPseudo<"llh", zext16, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+ def LLHR : UnaryRRE<"llh", 0xB995, zext16, GR32, GR32>;
+}
+
+// 64-bit extensions from registers.
+let neverHasSideEffects = 1 in {
+ def LLGCR : UnaryRRE<"llgc", 0xB984, zext8, GR64, GR64>;
+ def LLGHR : UnaryRRE<"llgh", 0xB985, zext16, GR64, GR64>;
+ def LLGFR : UnaryRRE<"llgf", 0xB916, zext32, GR64, GR32>;
+}
+
+// Match 32-to-64-bit zero extensions in which the source is already
+// in a 64-bit register.
+def : Pat<(and GR64:$src, 0xffffffff),
+ (LLGFR (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
+
+// 32-bit extensions from 8-bit memory. LLCMux expands to LLC or LLCH,
+// depending on the choice of register.
+def LLCMux : UnaryRXYPseudo<"llc", azextloadi8, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+def LLC : UnaryRXY<"llc", 0xE394, azextloadi8, GR32, 1>;
+def LLCH : UnaryRXY<"llch", 0xE3C2, azextloadi8, GR32, 1>,
+ Requires<[FeatureHighWord]>;
+
+// 32-bit extensions from 16-bit memory. LLHMux expands to LLH or LLHH,
+// depending on the choice of register.
+def LLHMux : UnaryRXYPseudo<"llh", azextloadi16, GRX32, 2>,
+ Requires<[FeatureHighWord]>;
+def LLH : UnaryRXY<"llh", 0xE395, azextloadi16, GR32, 2>;
+def LLHH : UnaryRXY<"llhh", 0xE3C6, azextloadi16, GR32, 2>,
+ Requires<[FeatureHighWord]>;
+def LLHRL : UnaryRILPC<"llhrl", 0xC42, aligned_azextloadi16, GR32>;
+
+// 64-bit extensions from memory.
+def LLGC : UnaryRXY<"llgc", 0xE390, azextloadi8, GR64, 1>;
+def LLGH : UnaryRXY<"llgh", 0xE391, azextloadi16, GR64, 2>;
+def LLGF : UnaryRXY<"llgf", 0xE316, azextloadi32, GR64, 4>;
+def LLGHRL : UnaryRILPC<"llghrl", 0xC46, aligned_azextloadi16, GR64>;
+def LLGFRL : UnaryRILPC<"llgfrl", 0xC4E, aligned_azextloadi32, GR64>;
+
+//===----------------------------------------------------------------------===//
+// Truncations
+//===----------------------------------------------------------------------===//
+
+// Truncations of 64-bit registers to 32-bit registers.
+def : Pat<(i32 (trunc GR64:$src)),
+ (EXTRACT_SUBREG GR64:$src, subreg_l32)>;
+
+// Truncations of 32-bit registers to 8-bit memory. STCMux expands to
+// STC, STCY or STCH, depending on the choice of register.
+def STCMux : StoreRXYPseudo<truncstorei8, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32, 1>;
+def STCH : StoreRXY<"stch", 0xE3C3, truncstorei8, GRH32, 1>,
+ Requires<[FeatureHighWord]>;
+
+// Truncations of 32-bit registers to 16-bit memory. STHMux expands to
+// STH, STHY or STHH, depending on the choice of register.
+def STHMux : StoreRXYPseudo<truncstorei16, GRX32, 1>,
+ Requires<[FeatureHighWord]>;
+defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32, 2>;
+def STHH : StoreRXY<"sthh", 0xE3C7, truncstorei16, GRH32, 2>,
+ Requires<[FeatureHighWord]>;
+def STHRL : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR32>;
+
+// Truncations of 64-bit registers to memory.
+defm : StoreGR64Pair<STC, STCY, truncstorei8>;
+defm : StoreGR64Pair<STH, STHY, truncstorei16>;
+def : StoreGR64PC<STHRL, aligned_truncstorei16>;
+defm : StoreGR64Pair<ST, STY, truncstorei32>;
+def : StoreGR64PC<STRL, aligned_truncstorei32>;
+
+//===----------------------------------------------------------------------===//
+// Multi-register moves
+//===----------------------------------------------------------------------===//
+
+// Multi-register loads.
+def LMG : LoadMultipleRSY<"lmg", 0xEB04, GR64>;
+
+// Multi-register stores.
+def STMG : StoreMultipleRSY<"stmg", 0xEB24, GR64>;
+
+//===----------------------------------------------------------------------===//
+// Byte swaps
+//===----------------------------------------------------------------------===//
+
+// Byte-swapping register moves.
+let neverHasSideEffects = 1 in {
+ def LRVR : UnaryRRE<"lrv", 0xB91F, bswap, GR32, GR32>;
+ def LRVGR : UnaryRRE<"lrvg", 0xB90F, bswap, GR64, GR64>;
+}
+
+// Byte-swapping loads. Unlike normal loads, these instructions are
+// allowed to access storage more than once.
+def LRV : UnaryRXY<"lrv", 0xE31E, loadu<bswap, nonvolatile_load>, GR32, 4>;
+def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu<bswap, nonvolatile_load>, GR64, 8>;
+
+// Likewise byte-swapping stores.
+def STRV : StoreRXY<"strv", 0xE33E, storeu<bswap, nonvolatile_store>, GR32, 4>;
+def STRVG : StoreRXY<"strvg", 0xE32F, storeu<bswap, nonvolatile_store>,
+ GR64, 8>;
+
+//===----------------------------------------------------------------------===//
+// Load address instructions
+//===----------------------------------------------------------------------===//
+
+// Load BDX-style addresses.
+let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isReMaterializable = 1,
+ DispKey = "la" in {
+ let DispSize = "12" in
+ def LA : InstRX<0x41, (outs GR64:$R1), (ins laaddr12pair:$XBD2),
+ "la\t$R1, $XBD2",
+ [(set GR64:$R1, laaddr12pair:$XBD2)]>;
+ let DispSize = "20" in
+ def LAY : InstRXY<0xE371, (outs GR64:$R1), (ins laaddr20pair:$XBD2),
+ "lay\t$R1, $XBD2",
+ [(set GR64:$R1, laaddr20pair:$XBD2)]>;
+}
+
+// Load a PC-relative address. There's no version of this instruction
+// with a 16-bit offset, so there's no relaxation.
+let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
+ isReMaterializable = 1 in {
+ def LARL : InstRIL<0xC00, (outs GR64:$R1), (ins pcrel32:$I2),
+ "larl\t$R1, $I2",
+ [(set GR64:$R1, pcrel32:$I2)]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Absolute and Negation
+//===----------------------------------------------------------------------===//
+
+let Defs = [CC] in {
+ let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ def LPR : UnaryRR <"lp", 0x10, z_iabs32, GR32, GR32>;
+ def LPGR : UnaryRRE<"lpg", 0xB900, z_iabs64, GR64, GR64>;
+ }
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LPGFR : UnaryRRE<"lpgf", 0xB910, null_frag, GR64, GR32>;
+}
+defm : SXU<z_iabs64, LPGFR>;
+
+let Defs = [CC] in {
+ let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ def LNR : UnaryRR <"ln", 0x11, z_inegabs32, GR32, GR32>;
+ def LNGR : UnaryRRE<"lng", 0xB901, z_inegabs64, GR64, GR64>;
+ }
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LNGFR : UnaryRRE<"lngf", 0xB911, null_frag, GR64, GR32>;
+}
+defm : SXU<z_inegabs64, LNGFR>;
+
+let Defs = [CC] in {
+ let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ def LCR : UnaryRR <"lc", 0x13, ineg, GR32, GR32>;
+ def LCGR : UnaryRRE<"lcg", 0xB903, ineg, GR64, GR64>;
+ }
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def LCGFR : UnaryRRE<"lcgf", 0xB913, null_frag, GR64, GR32>;
+}
+defm : SXU<ineg, LCGFR>;
+
+//===----------------------------------------------------------------------===//
+// Insertion
+//===----------------------------------------------------------------------===//
+
+let isCodeGenOnly = 1 in
+ defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, azextloadi8, 1>;
+defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, azextloadi8, 1>;
+
+defm : InsertMem<"inserti8", IC32, GR32, azextloadi8, bdxaddr12pair>;
+defm : InsertMem<"inserti8", IC32Y, GR32, azextloadi8, bdxaddr20pair>;
+
+defm : InsertMem<"inserti8", IC, GR64, azextloadi8, bdxaddr12pair>;
+defm : InsertMem<"inserti8", ICY, GR64, azextloadi8, bdxaddr20pair>;
+
+// Insertions of a 16-bit immediate, leaving other bits unaffected.
+// We don't have or_as_insert equivalents of these operations because
+// OI is available instead.
+//
+// IIxMux expands to II[LH]x, depending on the choice of register.
+def IILMux : BinaryRIPseudo<insertll, GRX32, imm32ll16>,
+ Requires<[FeatureHighWord]>;
+def IIHMux : BinaryRIPseudo<insertlh, GRX32, imm32lh16>,
+ Requires<[FeatureHighWord]>;
+def IILL : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>;
+def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>;
+def IIHL : BinaryRI<"iihl", 0xA51, insertll, GRH32, imm32ll16>;
+def IIHH : BinaryRI<"iihh", 0xA50, insertlh, GRH32, imm32lh16>;
+def IILL64 : BinaryAliasRI<insertll, GR64, imm64ll16>;
+def IILH64 : BinaryAliasRI<insertlh, GR64, imm64lh16>;
+def IIHL64 : BinaryAliasRI<inserthl, GR64, imm64hl16>;
+def IIHH64 : BinaryAliasRI<inserthh, GR64, imm64hh16>;
+
+// ...likewise for 32-bit immediates. For GR32s this is a general
+// full-width move. (We use IILF rather than something like LLILF
+// for 32-bit moves because IILF leaves the upper 32 bits of the
+// GR64 unchanged.)
+let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in {
+ def IIFMux : UnaryRIPseudo<bitconvert, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def IILF : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>;
+ def IIHF : UnaryRIL<"iihf", 0xC08, bitconvert, GRH32, uimm32>;
+}
+def IILF64 : BinaryAliasRIL<insertlf, GR64, imm64lf32>;
+def IIHF64 : BinaryAliasRIL<inserthf, GR64, imm64hf32>;
+
+// An alternative model of inserthf, with the first operand being
+// a zero-extended value.
+def : Pat<(or (zext32 GR32:$src), imm64hf32:$imm),
+ (IIHF64 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32),
+ imm64hf32:$imm)>;
+
+//===----------------------------------------------------------------------===//
+// Addition
+//===----------------------------------------------------------------------===//
+
+// Plain addition.
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ // Addition of a register.
+ let isCommutable = 1 in {
+ defm AR : BinaryRRAndK<"a", 0x1A, 0xB9F8, add, GR32, GR32>;
+ defm AGR : BinaryRREAndK<"ag", 0xB908, 0xB9E8, add, GR64, GR64>;
+ }
+ def AGFR : BinaryRRE<"agf", 0xB918, null_frag, GR64, GR32>;
+
+ // Addition of signed 16-bit immediates.
+ defm AHIMux : BinaryRIAndKPseudo<"ahimux", add, GRX32, imm32sx16>;
+ defm AHI : BinaryRIAndK<"ahi", 0xA7A, 0xECD8, add, GR32, imm32sx16>;
+ defm AGHI : BinaryRIAndK<"aghi", 0xA7B, 0xECD9, add, GR64, imm64sx16>;
+
+ // Addition of signed 32-bit immediates.
+ def AFIMux : BinaryRIPseudo<add, GRX32, simm32>,
+ Requires<[FeatureHighWord]>;
+ def AFI : BinaryRIL<"afi", 0xC29, add, GR32, simm32>;
+ def AIH : BinaryRIL<"aih", 0xCC8, add, GRH32, simm32>,
+ Requires<[FeatureHighWord]>;
+ def AGFI : BinaryRIL<"agfi", 0xC28, add, GR64, imm64sx32>;
+
+ // Addition of memory.
+ defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, asextloadi16, 2>;
+ defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load, 4>;
+ def AGF : BinaryRXY<"agf", 0xE318, add, GR64, asextloadi32, 4>;
+ def AG : BinaryRXY<"ag", 0xE308, add, GR64, load, 8>;
+
+ // Addition to memory.
+ def ASI : BinarySIY<"asi", 0xEB6A, add, imm32sx8>;
+ def AGSI : BinarySIY<"agsi", 0xEB7A, add, imm64sx8>;
+}
+defm : SXB<add, GR64, AGFR>;
+
+// Addition producing a carry.
+let Defs = [CC] in {
+ // Addition of a register.
+ let isCommutable = 1 in {
+ defm ALR : BinaryRRAndK<"al", 0x1E, 0xB9FA, addc, GR32, GR32>;
+ defm ALGR : BinaryRREAndK<"alg", 0xB90A, 0xB9EA, addc, GR64, GR64>;
+ }
+ def ALGFR : BinaryRRE<"algf", 0xB91A, null_frag, GR64, GR32>;
+
+ // Addition of signed 16-bit immediates.
+ def ALHSIK : BinaryRIE<"alhsik", 0xECDA, addc, GR32, imm32sx16>,
+ Requires<[FeatureDistinctOps]>;
+ def ALGHSIK : BinaryRIE<"alghsik", 0xECDB, addc, GR64, imm64sx16>,
+ Requires<[FeatureDistinctOps]>;
+
+ // Addition of unsigned 32-bit immediates.
+ def ALFI : BinaryRIL<"alfi", 0xC2B, addc, GR32, uimm32>;
+ def ALGFI : BinaryRIL<"algfi", 0xC2A, addc, GR64, imm64zx32>;
+
+ // Addition of memory.
+ defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load, 4>;
+ def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, azextloadi32, 4>;
+ def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load, 8>;
+}
+defm : ZXB<addc, GR64, ALGFR>;
+
+// Addition producing and using a carry.
+let Defs = [CC], Uses = [CC] in {
+ // Addition of a register.
+ def ALCR : BinaryRRE<"alc", 0xB998, adde, GR32, GR32>;
+ def ALCGR : BinaryRRE<"alcg", 0xB988, adde, GR64, GR64>;
+
+ // Addition of memory.
+ def ALC : BinaryRXY<"alc", 0xE398, adde, GR32, load, 4>;
+ def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load, 8>;
+}
+
+//===----------------------------------------------------------------------===//
+// Subtraction
+//===----------------------------------------------------------------------===//
+
+// Plain substraction. Although immediate forms exist, we use the
+// add-immediate instruction instead.
+let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
+ // Subtraction of a register.
+ defm SR : BinaryRRAndK<"s", 0x1B, 0xB9F9, sub, GR32, GR32>;
+ def SGFR : BinaryRRE<"sgf", 0xB919, null_frag, GR64, GR32>;
+ defm SGR : BinaryRREAndK<"sg", 0xB909, 0xB9E9, sub, GR64, GR64>;
+
+ // Subtraction of memory.
+ defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, asextloadi16, 2>;
+ defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load, 4>;
+ def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, asextloadi32, 4>;
+ def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load, 8>;
+}
+defm : SXB<sub, GR64, SGFR>;
+
+// Subtraction producing a carry.
+let Defs = [CC] in {
+ // Subtraction of a register.
+ defm SLR : BinaryRRAndK<"sl", 0x1F, 0xB9FB, subc, GR32, GR32>;
+ def SLGFR : BinaryRRE<"slgf", 0xB91B, null_frag, GR64, GR32>;
+ defm SLGR : BinaryRREAndK<"slg", 0xB90B, 0xB9EB, subc, GR64, GR64>;
+
+ // Subtraction of unsigned 32-bit immediates. These don't match
+ // subc because we prefer addc for constants.
+ def SLFI : BinaryRIL<"slfi", 0xC25, null_frag, GR32, uimm32>;
+ def SLGFI : BinaryRIL<"slgfi", 0xC24, null_frag, GR64, imm64zx32>;
+
+ // Subtraction of memory.
+ defm SL : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load, 4>;
+ def SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, azextloadi32, 4>;
+ def SLG : BinaryRXY<"slg", 0xE30B, subc, GR64, load, 8>;
+}
+defm : ZXB<subc, GR64, SLGFR>;
+
+// Subtraction producing and using a carry.
+let Defs = [CC], Uses = [CC] in {
+ // Subtraction of a register.
+ def SLBR : BinaryRRE<"slb", 0xB999, sube, GR32, GR32>;
+ def SLGBR : BinaryRRE<"slbg", 0xB989, sube, GR64, GR64>;
+
+ // Subtraction of memory.
+ def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load, 4>;
+ def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load, 8>;
+}
+
+//===----------------------------------------------------------------------===//
+// AND
+//===----------------------------------------------------------------------===//
+
+let Defs = [CC] in {
+ // ANDs of a register.
+ let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm NR : BinaryRRAndK<"n", 0x14, 0xB9F4, and, GR32, GR32>;
+ defm NGR : BinaryRREAndK<"ng", 0xB980, 0xB9E4, and, GR64, GR64>;
+ }
+
+ let isConvertibleToThreeAddress = 1 in {
+ // ANDs of a 16-bit immediate, leaving other bits unaffected.
+ // The CC result only reflects the 16-bit field, not the full register.
+ //
+ // NIxMux expands to NI[LH]x, depending on the choice of register.
+ def NILMux : BinaryRIPseudo<and, GRX32, imm32ll16c>,
+ Requires<[FeatureHighWord]>;
+ def NIHMux : BinaryRIPseudo<and, GRX32, imm32lh16c>,
+ Requires<[FeatureHighWord]>;
+ def NILL : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>;
+ def NILH : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>;
+ def NIHL : BinaryRI<"nihl", 0xA55, and, GRH32, imm32ll16c>;
+ def NIHH : BinaryRI<"nihh", 0xA54, and, GRH32, imm32lh16c>;
+ def NILL64 : BinaryAliasRI<and, GR64, imm64ll16c>;
+ def NILH64 : BinaryAliasRI<and, GR64, imm64lh16c>;
+ def NIHL64 : BinaryAliasRI<and, GR64, imm64hl16c>;
+ def NIHH64 : BinaryAliasRI<and, GR64, imm64hh16c>;
+
+ // ANDs of a 32-bit immediate, leaving other bits unaffected.
+ // The CC result only reflects the 32-bit field, which means we can
+ // use it as a zero indicator for i32 operations but not otherwise.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ // Expands to NILF or NIHF, depending on the choice of register.
+ def NIFMux : BinaryRIPseudo<and, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def NILF : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>;
+ def NIHF : BinaryRIL<"nihf", 0xC0A, and, GRH32, uimm32>;
+ }
+ def NILF64 : BinaryAliasRIL<and, GR64, imm64lf32c>;
+ def NIHF64 : BinaryAliasRIL<and, GR64, imm64hf32c>;
+ }
+
+ // ANDs of memory.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load, 4>;
+ def NG : BinaryRXY<"ng", 0xE380, and, GR64, load, 8>;
+ }
+
+ // AND to memory
+ defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, uimm8>;
+
+ // Block AND.
+ let mayLoad = 1, mayStore = 1 in
+ defm NC : MemorySS<"nc", 0xD4, z_nc, z_nc_loop>;
+}
+defm : RMWIByte<and, bdaddr12pair, NI>;
+defm : RMWIByte<and, bdaddr20pair, NIY>;
+
+//===----------------------------------------------------------------------===//
+// OR
+//===----------------------------------------------------------------------===//
+
+let Defs = [CC] in {
+ // ORs of a register.
+ let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm OR : BinaryRRAndK<"o", 0x16, 0xB9F6, or, GR32, GR32>;
+ defm OGR : BinaryRREAndK<"og", 0xB981, 0xB9E6, or, GR64, GR64>;
+ }
+
+ // ORs of a 16-bit immediate, leaving other bits unaffected.
+ // The CC result only reflects the 16-bit field, not the full register.
+ //
+ // OIxMux expands to OI[LH]x, depending on the choice of register.
+ def OILMux : BinaryRIPseudo<or, GRX32, imm32ll16>,
+ Requires<[FeatureHighWord]>;
+ def OIHMux : BinaryRIPseudo<or, GRX32, imm32lh16>,
+ Requires<[FeatureHighWord]>;
+ def OILL : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>;
+ def OILH : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>;
+ def OIHL : BinaryRI<"oihl", 0xA59, or, GRH32, imm32ll16>;
+ def OIHH : BinaryRI<"oihh", 0xA58, or, GRH32, imm32lh16>;
+ def OILL64 : BinaryAliasRI<or, GR64, imm64ll16>;
+ def OILH64 : BinaryAliasRI<or, GR64, imm64lh16>;
+ def OIHL64 : BinaryAliasRI<or, GR64, imm64hl16>;
+ def OIHH64 : BinaryAliasRI<or, GR64, imm64hh16>;
+
+ // ORs of a 32-bit immediate, leaving other bits unaffected.
+ // The CC result only reflects the 32-bit field, which means we can
+ // use it as a zero indicator for i32 operations but not otherwise.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ // Expands to OILF or OIHF, depending on the choice of register.
+ def OIFMux : BinaryRIPseudo<or, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def OILF : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>;
+ def OIHF : BinaryRIL<"oihf", 0xC0C, or, GRH32, uimm32>;
+ }
+ def OILF64 : BinaryAliasRIL<or, GR64, imm64lf32>;
+ def OIHF64 : BinaryAliasRIL<or, GR64, imm64hf32>;
+
+ // ORs of memory.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm O : BinaryRXPair<"o", 0x56, 0xE356, or, GR32, load, 4>;
+ def OG : BinaryRXY<"og", 0xE381, or, GR64, load, 8>;
+ }
+
+ // OR to memory
+ defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, uimm8>;
+
+ // Block OR.
+ let mayLoad = 1, mayStore = 1 in
+ defm OC : MemorySS<"oc", 0xD6, z_oc, z_oc_loop>;
+}
+defm : RMWIByte<or, bdaddr12pair, OI>;
+defm : RMWIByte<or, bdaddr20pair, OIY>;
+
+//===----------------------------------------------------------------------===//
+// XOR
+//===----------------------------------------------------------------------===//
+
+let Defs = [CC] in {
+ // XORs of a register.
+ let isCommutable = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm XR : BinaryRRAndK<"x", 0x17, 0xB9F7, xor, GR32, GR32>;
+ defm XGR : BinaryRREAndK<"xg", 0xB982, 0xB9E7, xor, GR64, GR64>;
+ }
+
+ // XORs of a 32-bit immediate, leaving other bits unaffected.
+ // The CC result only reflects the 32-bit field, which means we can
+ // use it as a zero indicator for i32 operations but not otherwise.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ // Expands to XILF or XIHF, depending on the choice of register.
+ def XIFMux : BinaryRIPseudo<xor, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def XILF : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>;
+ def XIHF : BinaryRIL<"xihf", 0xC06, xor, GRH32, uimm32>;
+ }
+ def XILF64 : BinaryAliasRIL<xor, GR64, imm64lf32>;
+ def XIHF64 : BinaryAliasRIL<xor, GR64, imm64hf32>;
+
+ // XORs of memory.
+ let CCValues = 0xC, CompareZeroCCMask = 0x8 in {
+ defm X : BinaryRXPair<"x",0x57, 0xE357, xor, GR32, load, 4>;
+ def XG : BinaryRXY<"xg", 0xE382, xor, GR64, load, 8>;
+ }
+
+ // XOR to memory
+ defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, uimm8>;
+
+ // Block XOR.
+ let mayLoad = 1, mayStore = 1 in
+ defm XC : MemorySS<"xc", 0xD7, z_xc, z_xc_loop>;
+}
+defm : RMWIByte<xor, bdaddr12pair, XI>;
+defm : RMWIByte<xor, bdaddr20pair, XIY>;
+
+//===----------------------------------------------------------------------===//
+// Multiplication
+//===----------------------------------------------------------------------===//
+
+// Multiplication of a register.
+let isCommutable = 1 in {
+ def MSR : BinaryRRE<"ms", 0xB252, mul, GR32, GR32>;
+ def MSGR : BinaryRRE<"msg", 0xB90C, mul, GR64, GR64>;
+}
+def MSGFR : BinaryRRE<"msgf", 0xB91C, null_frag, GR64, GR32>;
+defm : SXB<mul, GR64, MSGFR>;
+
+// Multiplication of a signed 16-bit immediate.
+def MHI : BinaryRI<"mhi", 0xA7C, mul, GR32, imm32sx16>;
+def MGHI : BinaryRI<"mghi", 0xA7D, mul, GR64, imm64sx16>;
+
+// Multiplication of a signed 32-bit immediate.
+def MSFI : BinaryRIL<"msfi", 0xC21, mul, GR32, simm32>;
+def MSGFI : BinaryRIL<"msgfi", 0xC20, mul, GR64, imm64sx32>;
+
+// Multiplication of memory.
+defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, asextloadi16, 2>;
+defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load, 4>;
+def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, asextloadi32, 4>;
+def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load, 8>;
+
+// Multiplication of a register, producing two results.
+def MLGR : BinaryRRE<"mlg", 0xB986, z_umul_lohi64, GR128, GR64>;
+
+// Multiplication of memory, producing two results.
+def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>;
+
+//===----------------------------------------------------------------------===//
+// Division and remainder
+//===----------------------------------------------------------------------===//
+
+// Division and remainder, from registers.
+def DSGFR : BinaryRRE<"dsgf", 0xB91D, z_sdivrem32, GR128, GR32>;
+def DSGR : BinaryRRE<"dsg", 0xB90D, z_sdivrem64, GR128, GR64>;
+def DLR : BinaryRRE<"dl", 0xB997, z_udivrem32, GR128, GR32>;
+def DLGR : BinaryRRE<"dlg", 0xB987, z_udivrem64, GR128, GR64>;
+
+// Division and remainder, from memory.
+def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem32, GR128, load, 4>;
+def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load, 8>;
+def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load, 4>;
+def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>;
+
+//===----------------------------------------------------------------------===//
+// Shifts
+//===----------------------------------------------------------------------===//
+
+// Shift left.
+let neverHasSideEffects = 1 in {
+ defm SLL : ShiftRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
+ def SLLG : ShiftRSY<"sllg", 0xEB0D, shl, GR64>;
+}
+
+// Logical shift right.
+let neverHasSideEffects = 1 in {
+ defm SRL : ShiftRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
+ def SRLG : ShiftRSY<"srlg", 0xEB0C, srl, GR64>;
+}
+
+// Arithmetic shift right.
+let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
+ defm SRA : ShiftRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>;
+ def SRAG : ShiftRSY<"srag", 0xEB0A, sra, GR64>;
+}
+
+// Rotate left.
+let neverHasSideEffects = 1 in {
+ def RLL : ShiftRSY<"rll", 0xEB1D, rotl, GR32>;
+ def RLLG : ShiftRSY<"rllg", 0xEB1C, rotl, GR64>;
+}
+
+// Rotate second operand left and inserted selected bits into first operand.
+// These can act like 32-bit operands provided that the constant start and
+// end bits (operands 2 and 3) are in the range [32, 64).
+let Defs = [CC] in {
+ let isCodeGenOnly = 1 in
+ def RISBG32 : RotateSelectRIEf<"risbg", 0xEC55, GR32, GR32>;
+ let CCValues = 0xE, CompareZeroCCMask = 0xE in
+ def RISBG : RotateSelectRIEf<"risbg", 0xEC55, GR64, GR64>;
+}
+
+// Forms of RISBG that only affect one word of the destination register.
+// They do not set CC.
+def RISBMux : RotateSelectRIEfPseudo<GRX32, GRX32>, Requires<[FeatureHighWord]>;
+def RISBLL : RotateSelectAliasRIEf<GR32, GR32>, Requires<[FeatureHighWord]>;
+def RISBLH : RotateSelectAliasRIEf<GR32, GRH32>, Requires<[FeatureHighWord]>;
+def RISBHL : RotateSelectAliasRIEf<GRH32, GR32>, Requires<[FeatureHighWord]>;
+def RISBHH : RotateSelectAliasRIEf<GRH32, GRH32>, Requires<[FeatureHighWord]>;
+def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR64>,
+ Requires<[FeatureHighWord]>;
+def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GRH32, GR64>,
+ Requires<[FeatureHighWord]>;
+
+// Rotate second operand left and perform a logical operation with selected
+// bits of the first operand. The CC result only describes the selected bits,
+// so isn't useful for a full comparison against zero.
+let Defs = [CC] in {
+ def RNSBG : RotateSelectRIEf<"rnsbg", 0xEC54, GR64, GR64>;
+ def ROSBG : RotateSelectRIEf<"rosbg", 0xEC56, GR64, GR64>;
+ def RXSBG : RotateSelectRIEf<"rxsbg", 0xEC57, GR64, GR64>;
+}
+
+//===----------------------------------------------------------------------===//
+// Comparison
+//===----------------------------------------------------------------------===//
+
+// Signed comparisons. We put these before the unsigned comparisons because
+// some of the signed forms have COMPARE AND BRANCH equivalents whereas none
+// of the unsigned forms do.
+let Defs = [CC], CCValues = 0xE in {
+ // Comparison with a register.
+ def CR : CompareRR <"c", 0x19, z_scmp, GR32, GR32>;
+ def CGFR : CompareRRE<"cgf", 0xB930, null_frag, GR64, GR32>;
+ def CGR : CompareRRE<"cg", 0xB920, z_scmp, GR64, GR64>;
+
+ // Comparison with a signed 16-bit immediate.
+ def CHI : CompareRI<"chi", 0xA7E, z_scmp, GR32, imm32sx16>;
+ def CGHI : CompareRI<"cghi", 0xA7F, z_scmp, GR64, imm64sx16>;
+
+ // Comparison with a signed 32-bit immediate. CFIMux expands to CFI or CIH,
+ // depending on the choice of register.
+ def CFIMux : CompareRIPseudo<z_scmp, GRX32, simm32>,
+ Requires<[FeatureHighWord]>;
+ def CFI : CompareRIL<"cfi", 0xC2D, z_scmp, GR32, simm32>;
+ def CIH : CompareRIL<"cih", 0xCCD, z_scmp, GRH32, simm32>,
+ Requires<[FeatureHighWord]>;
+ def CGFI : CompareRIL<"cgfi", 0xC2C, z_scmp, GR64, imm64sx32>;
+
+ // Comparison with memory.
+ defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_scmp, GR32, asextloadi16, 2>;
+ def CMux : CompareRXYPseudo<z_scmp, GRX32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ defm C : CompareRXPair<"c", 0x59, 0xE359, z_scmp, GR32, load, 4>;
+ def CHF : CompareRXY<"chf", 0xE3CD, z_scmp, GRH32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ def CGH : CompareRXY<"cgh", 0xE334, z_scmp, GR64, asextloadi16, 2>;
+ def CGF : CompareRXY<"cgf", 0xE330, z_scmp, GR64, asextloadi32, 4>;
+ def CG : CompareRXY<"cg", 0xE320, z_scmp, GR64, load, 8>;
+ def CHRL : CompareRILPC<"chrl", 0xC65, z_scmp, GR32, aligned_asextloadi16>;
+ def CRL : CompareRILPC<"crl", 0xC6D, z_scmp, GR32, aligned_load>;
+ def CGHRL : CompareRILPC<"cghrl", 0xC64, z_scmp, GR64, aligned_asextloadi16>;
+ def CGFRL : CompareRILPC<"cgfrl", 0xC6C, z_scmp, GR64, aligned_asextloadi32>;
+ def CGRL : CompareRILPC<"cgrl", 0xC68, z_scmp, GR64, aligned_load>;
+
+ // Comparison between memory and a signed 16-bit immediate.
+ def CHHSI : CompareSIL<"chhsi", 0xE554, z_scmp, asextloadi16, imm32sx16>;
+ def CHSI : CompareSIL<"chsi", 0xE55C, z_scmp, load, imm32sx16>;
+ def CGHSI : CompareSIL<"cghsi", 0xE558, z_scmp, load, imm64sx16>;
+}
+defm : SXB<z_scmp, GR64, CGFR>;
+
+// Unsigned comparisons.
+let Defs = [CC], CCValues = 0xE, IsLogical = 1 in {
+ // Comparison with a register.
+ def CLR : CompareRR <"cl", 0x15, z_ucmp, GR32, GR32>;
+ def CLGFR : CompareRRE<"clgf", 0xB931, null_frag, GR64, GR32>;
+ def CLGR : CompareRRE<"clg", 0xB921, z_ucmp, GR64, GR64>;
+
+ // Comparison with an unsigned 32-bit immediate. CLFIMux expands to CLFI
+ // or CLIH, depending on the choice of register.
+ def CLFIMux : CompareRIPseudo<z_ucmp, GRX32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def CLFI : CompareRIL<"clfi", 0xC2F, z_ucmp, GR32, uimm32>;
+ def CLIH : CompareRIL<"clih", 0xCCF, z_ucmp, GR32, uimm32>,
+ Requires<[FeatureHighWord]>;
+ def CLGFI : CompareRIL<"clgfi", 0xC2E, z_ucmp, GR64, imm64zx32>;
+
+ // Comparison with memory.
+ def CLMux : CompareRXYPseudo<z_ucmp, GRX32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load, 4>;
+ def CLHF : CompareRXY<"clhf", 0xE3CF, z_ucmp, GRH32, load, 4>,
+ Requires<[FeatureHighWord]>;
+ def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, azextloadi32, 4>;
+ def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load, 8>;
+ def CLHRL : CompareRILPC<"clhrl", 0xC67, z_ucmp, GR32,
+ aligned_azextloadi16>;
+ def CLRL : CompareRILPC<"clrl", 0xC6F, z_ucmp, GR32,
+ aligned_load>;
+ def CLGHRL : CompareRILPC<"clghrl", 0xC66, z_ucmp, GR64,
+ aligned_azextloadi16>;
+ def CLGFRL : CompareRILPC<"clgfrl", 0xC6E, z_ucmp, GR64,
+ aligned_azextloadi32>;
+ def CLGRL : CompareRILPC<"clgrl", 0xC6A, z_ucmp, GR64,
+ aligned_load>;
+
+ // Comparison between memory and an unsigned 8-bit immediate.
+ defm CLI : CompareSIPair<"cli", 0x95, 0xEB55, z_ucmp, azextloadi8, imm32zx8>;
+
+ // Comparison between memory and an unsigned 16-bit immediate.
+ def CLHHSI : CompareSIL<"clhhsi", 0xE555, z_ucmp, azextloadi16, imm32zx16>;
+ def CLFHSI : CompareSIL<"clfhsi", 0xE55D, z_ucmp, load, imm32zx16>;
+ def CLGHSI : CompareSIL<"clghsi", 0xE559, z_ucmp, load, imm64zx16>;
+}
+defm : ZXB<z_ucmp, GR64, CLGFR>;
+
+// Memory-to-memory comparison.
+let mayLoad = 1, Defs = [CC] in
+ defm CLC : MemorySS<"clc", 0xD5, z_clc, z_clc_loop>;
+
+// String comparison.
+let mayLoad = 1, Defs = [CC], Uses = [R0L] in
+ defm CLST : StringRRE<"clst", 0xB25D, z_strcmp>;
+
+// Test under mask.
+let Defs = [CC] in {
+ // TMxMux expands to TM[LH]x, depending on the choice of register.
+ def TMLMux : CompareRIPseudo<z_tm_reg, GRX32, imm32ll16>,
+ Requires<[FeatureHighWord]>;
+ def TMHMux : CompareRIPseudo<z_tm_reg, GRX32, imm32lh16>,
+ Requires<[FeatureHighWord]>;
+ def TMLL : CompareRI<"tmll", 0xA71, z_tm_reg, GR32, imm32ll16>;
+ def TMLH : CompareRI<"tmlh", 0xA70, z_tm_reg, GR32, imm32lh16>;
+ def TMHL : CompareRI<"tmhl", 0xA73, z_tm_reg, GRH32, imm32ll16>;
+ def TMHH : CompareRI<"tmhh", 0xA72, z_tm_reg, GRH32, imm32lh16>;
+
+ def TMLL64 : CompareAliasRI<z_tm_reg, GR64, imm64ll16>;
+ def TMLH64 : CompareAliasRI<z_tm_reg, GR64, imm64lh16>;
+ def TMHL64 : CompareAliasRI<z_tm_reg, GR64, imm64hl16>;
+ def TMHH64 : CompareAliasRI<z_tm_reg, GR64, imm64hh16>;
+
+ defm TM : CompareSIPair<"tm", 0x91, 0xEB51, z_tm_mem, anyextloadi8, imm32zx8>;
+}
+
+//===----------------------------------------------------------------------===//
+// Prefetch
+//===----------------------------------------------------------------------===//
+
+def PFD : PrefetchRXY<"pfd", 0xE336, z_prefetch>;
+def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
+
+//===----------------------------------------------------------------------===//
+// Atomic operations
+//===----------------------------------------------------------------------===//
+
+def ATOMIC_SWAPW : AtomicLoadWBinaryReg<z_atomic_swapw>;
+def ATOMIC_SWAP_32 : AtomicLoadBinaryReg32<atomic_swap_32>;
+def ATOMIC_SWAP_64 : AtomicLoadBinaryReg64<atomic_swap_64>;
+
+def ATOMIC_LOADW_AR : AtomicLoadWBinaryReg<z_atomic_loadw_add>;
+def ATOMIC_LOADW_AFI : AtomicLoadWBinaryImm<z_atomic_loadw_add, simm32>;
+def ATOMIC_LOAD_AR : AtomicLoadBinaryReg32<atomic_load_add_32>;
+def ATOMIC_LOAD_AHI : AtomicLoadBinaryImm32<atomic_load_add_32, imm32sx16>;
+def ATOMIC_LOAD_AFI : AtomicLoadBinaryImm32<atomic_load_add_32, simm32>;
+def ATOMIC_LOAD_AGR : AtomicLoadBinaryReg64<atomic_load_add_64>;
+def ATOMIC_LOAD_AGHI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx16>;
+def ATOMIC_LOAD_AGFI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx32>;
+
+def ATOMIC_LOADW_SR : AtomicLoadWBinaryReg<z_atomic_loadw_sub>;
+def ATOMIC_LOAD_SR : AtomicLoadBinaryReg32<atomic_load_sub_32>;
+def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
+
+def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
+def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
+def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
+def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>;
+def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>;
+def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
+def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
+def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>;
+def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>;
+def ATOMIC_LOAD_NIHL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>;
+def ATOMIC_LOAD_NIHH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>;
+def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>;
+def ATOMIC_LOAD_NIHF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>;
+
+def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>;
+def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>;
+def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
+def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
+def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
+def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
+def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
+def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
+def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
+def ATOMIC_LOAD_OIHL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
+def ATOMIC_LOAD_OIHH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
+def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
+def ATOMIC_LOAD_OIHF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
+
+def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>;
+def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>;
+def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
+def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
+def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
+def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
+def ATOMIC_LOAD_XIHF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
+
+def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>;
+def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand,
+ imm32lh16c>;
+def ATOMIC_LOAD_NRi : AtomicLoadBinaryReg32<atomic_load_nand_32>;
+def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm32<atomic_load_nand_32,
+ imm32ll16c>;
+def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm32<atomic_load_nand_32,
+ imm32lh16c>;
+def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>;
+def ATOMIC_LOAD_NGRi : AtomicLoadBinaryReg64<atomic_load_nand_64>;
+def ATOMIC_LOAD_NILL64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
+ imm64ll16c>;
+def ATOMIC_LOAD_NILH64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
+ imm64lh16c>;
+def ATOMIC_LOAD_NIHL64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
+ imm64hl16c>;
+def ATOMIC_LOAD_NIHH64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
+ imm64hh16c>;
+def ATOMIC_LOAD_NILF64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
+ imm64lf32c>;
+def ATOMIC_LOAD_NIHF64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
+ imm64hf32c>;
+
+def ATOMIC_LOADW_MIN : AtomicLoadWBinaryReg<z_atomic_loadw_min>;
+def ATOMIC_LOAD_MIN_32 : AtomicLoadBinaryReg32<atomic_load_min_32>;
+def ATOMIC_LOAD_MIN_64 : AtomicLoadBinaryReg64<atomic_load_min_64>;
+
+def ATOMIC_LOADW_MAX : AtomicLoadWBinaryReg<z_atomic_loadw_max>;
+def ATOMIC_LOAD_MAX_32 : AtomicLoadBinaryReg32<atomic_load_max_32>;
+def ATOMIC_LOAD_MAX_64 : AtomicLoadBinaryReg64<atomic_load_max_64>;
+
+def ATOMIC_LOADW_UMIN : AtomicLoadWBinaryReg<z_atomic_loadw_umin>;
+def ATOMIC_LOAD_UMIN_32 : AtomicLoadBinaryReg32<atomic_load_umin_32>;
+def ATOMIC_LOAD_UMIN_64 : AtomicLoadBinaryReg64<atomic_load_umin_64>;
+
+def ATOMIC_LOADW_UMAX : AtomicLoadWBinaryReg<z_atomic_loadw_umax>;
+def ATOMIC_LOAD_UMAX_32 : AtomicLoadBinaryReg32<atomic_load_umax_32>;
+def ATOMIC_LOAD_UMAX_64 : AtomicLoadBinaryReg64<atomic_load_umax_64>;
+
+def ATOMIC_CMP_SWAPW
+ : Pseudo<(outs GR32:$dst), (ins bdaddr20only:$addr, GR32:$cmp, GR32:$swap,
+ ADDR32:$bitshift, ADDR32:$negbitshift,
+ uimm32:$bitsize),
+ [(set GR32:$dst,
+ (z_atomic_cmp_swapw bdaddr20only:$addr, GR32:$cmp, GR32:$swap,
+ ADDR32:$bitshift, ADDR32:$negbitshift,
+ uimm32:$bitsize))]> {
+ let Defs = [CC];
+ let mayLoad = 1;
+ let mayStore = 1;
+ let usesCustomInserter = 1;
+}
+
+let Defs = [CC] in {
+ defm CS : CmpSwapRSPair<"cs", 0xBA, 0xEB14, atomic_cmp_swap_32, GR32>;
+ def CSG : CmpSwapRSY<"csg", 0xEB30, atomic_cmp_swap_64, GR64>;
+}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Instructions.
+//===----------------------------------------------------------------------===//
+
+// Extract CC into bits 29 and 28 of a register.
+let Uses = [CC] in
+ def IPM : InherentRRE<"ipm", 0xB222, GR32, (z_ipm)>;
+
+// Read a 32-bit access register into a GR32. As with all GR32 operations,
+// the upper 32 bits of the enclosing GR64 remain unchanged, which is useful
+// when a 64-bit address is stored in a pair of access registers.
+def EAR : InstRRE<0xB24F, (outs GR32:$R1), (ins access_reg:$R2),
+ "ear\t$R1, $R2",
+ [(set GR32:$R1, (z_extract_access access_reg:$R2))]>;
+
+// Find leftmost one, AKA count leading zeros. The instruction actually
+// returns a pair of GR64s, the first giving the number of leading zeros
+// and the second giving a copy of the source with the leftmost one bit
+// cleared. We only use the first result here.
+let Defs = [CC] in {
+ def FLOGR : UnaryRRE<"flog", 0xB983, null_frag, GR128, GR64>;
+}
+def : Pat<(ctlz GR64:$src),
+ (EXTRACT_SUBREG (FLOGR GR64:$src), subreg_h64)>;
+
+// Use subregs to populate the "don't care" bits in a 32-bit to 64-bit anyext.
+def : Pat<(i64 (anyext GR32:$src)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, subreg_l32)>;
+
+// Extend GR32s and GR64s to GR128s.
+let usesCustomInserter = 1 in {
+ def AEXT128_64 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
+ def ZEXT128_32 : Pseudo<(outs GR128:$dst), (ins GR32:$src), []>;
+ def ZEXT128_64 : Pseudo<(outs GR128:$dst), (ins GR64:$src), []>;
+}
+
+// Search a block of memory for a character.
+let mayLoad = 1, Defs = [CC], Uses = [R0L] in
+ defm SRST : StringRRE<"srst", 0xb25e, z_search_string>;
+
+//===----------------------------------------------------------------------===//
+// Peepholes.
+//===----------------------------------------------------------------------===//
+
+// Use AL* for GR64 additions of unsigned 32-bit values.
+defm : ZXB<add, GR64, ALGFR>;
+def : Pat<(add GR64:$src1, imm64zx32:$src2),
+ (ALGFI GR64:$src1, imm64zx32:$src2)>;
+def : Pat<(add GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
+ (ALGF GR64:$src1, bdxaddr20only:$addr)>;
+
+// Use SL* for GR64 subtractions of unsigned 32-bit values.
+defm : ZXB<sub, GR64, SLGFR>;
+def : Pat<(add GR64:$src1, imm64zx32n:$src2),
+ (SLGFI GR64:$src1, imm64zx32n:$src2)>;
+def : Pat<(sub GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
+ (SLGF GR64:$src1, bdxaddr20only:$addr)>;
+
+// Optimize sign-extended 1/0 selects to -1/0 selects. This is important
+// for vector legalization.
+def : Pat<(sra (shl (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid, uimm8zx4:$cc)),
+ (i32 31)),
+ (i32 31)),
+ (Select32 (LHI -1), (LHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
+def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid,
+ uimm8zx4:$cc)))),
+ (i32 63)),
+ (i32 63)),
+ (Select64 (LGHI -1), (LGHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
+
+// Peepholes for turning scalar operations into block operations.
+defm : BlockLoadStore<anyextloadi8, i32, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 1>;
+defm : BlockLoadStore<anyextloadi16, i32, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 2>;
+defm : BlockLoadStore<load, i32, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 4>;
+defm : BlockLoadStore<anyextloadi8, i64, MVCSequence, NCSequence,
+ OCSequence, XCSequence, 1>;
+defm : BlockLoadStore<anyextloadi16, i64, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 2>;
+defm : BlockLoadStore<anyextloadi32, i64, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 4>;
+defm : BlockLoadStore<load, i64, MVCSequence, NCSequence, OCSequence,
+ XCSequence, 8>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
new file mode 100644
index 000000000000..ba027d460440
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -0,0 +1,462 @@
+//===-- SystemZLongBranch.cpp - Branch lengthening for SystemZ ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass makes sure that all branches are in range. There are several ways
+// in which this could be done. One aggressive approach is to assume that all
+// branches are in range and successively replace those that turn out not
+// to be in range with a longer form (branch relaxation). A simple
+// implementation is to continually walk through the function relaxing
+// branches until no more changes are needed and a fixed point is reached.
+// However, in the pathological worst case, this implementation is
+// quadratic in the number of blocks; relaxing branch N can make branch N-1
+// go out of range, which in turn can make branch N-2 go out of range,
+// and so on.
+//
+// An alternative approach is to assume that all branches must be
+// converted to their long forms, then reinstate the short forms of
+// branches that, even under this pessimistic assumption, turn out to be
+// in range (branch shortening). This too can be implemented as a function
+// walk that is repeated until a fixed point is reached. In general,
+// the result of shortening is not as good as that of relaxation, and
+// shortening is also quadratic in the worst case; shortening branch N
+// can bring branch N-1 in range of the short form, which in turn can do
+// the same for branch N-2, and so on. The main advantage of shortening
+// is that each walk through the function produces valid code, so it is
+// possible to stop at any point after the first walk. The quadraticness
+// could therefore be handled with a maximum pass count, although the
+// question then becomes: what maximum count should be used?
+//
+// On SystemZ, long branches are only needed for functions bigger than 64k,
+// which are relatively rare to begin with, and the long branch sequences
+// are actually relatively cheap. It therefore doesn't seem worth spending
+// much compilation time on the problem. Instead, the approach we take is:
+//
+// (1) Work out the address that each block would have if no branches
+// need relaxing. Exit the pass early if all branches are in range
+// according to this assumption.
+//
+// (2) Work out the address that each block would have if all branches
+// need relaxing.
+//
+// (3) Walk through the block calculating the final address of each instruction
+// and relaxing those that need to be relaxed. For backward branches,
+// this check uses the final address of the target block, as calculated
+// earlier in the walk. For forward branches, this check uses the
+// address of the target block that was calculated in (2). Both checks
+// give a conservatively-correct range.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-long-branch"
+
+#include "SystemZTargetMachine.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+STATISTIC(LongBranches, "Number of long branches.");
+
+namespace {
+ // Represents positional information about a basic block.
+ struct MBBInfo {
+ // The address that we currently assume the block has.
+ uint64_t Address;
+
+ // The size of the block in bytes, excluding terminators.
+ // This value never changes.
+ uint64_t Size;
+
+ // The minimum alignment of the block, as a log2 value.
+ // This value never changes.
+ unsigned Alignment;
+
+ // The number of terminators in this block. This value never changes.
+ unsigned NumTerminators;
+
+ MBBInfo()
+ : Address(0), Size(0), Alignment(0), NumTerminators(0) {}
+ };
+
+ // Represents the state of a block terminator.
+ struct TerminatorInfo {
+ // If this terminator is a relaxable branch, this points to the branch
+ // instruction, otherwise it is null.
+ MachineInstr *Branch;
+
+ // The address that we currently assume the terminator has.
+ uint64_t Address;
+
+ // The current size of the terminator in bytes.
+ uint64_t Size;
+
+ // If Branch is nonnull, this is the number of the target block,
+ // otherwise it is unused.
+ unsigned TargetBlock;
+
+ // If Branch is nonnull, this is the length of the longest relaxed form,
+ // otherwise it is zero.
+ unsigned ExtraRelaxSize;
+
+ TerminatorInfo() : Branch(0), Size(0), TargetBlock(0), ExtraRelaxSize(0) {}
+ };
+
+ // Used to keep track of the current position while iterating over the blocks.
+ struct BlockPosition {
+ // The address that we assume this position has.
+ uint64_t Address;
+
+ // The number of low bits in Address that are known to be the same
+ // as the runtime address.
+ unsigned KnownBits;
+
+ BlockPosition(unsigned InitialAlignment)
+ : Address(0), KnownBits(InitialAlignment) {}
+ };
+
+ class SystemZLongBranch : public MachineFunctionPass {
+ public:
+ static char ID;
+ SystemZLongBranch(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(0) {}
+
+ virtual const char *getPassName() const {
+ return "SystemZ Long Branch";
+ }
+
+ bool runOnMachineFunction(MachineFunction &F);
+
+ private:
+ void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
+ void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
+ bool AssumeRelaxed);
+ TerminatorInfo describeTerminator(MachineInstr *MI);
+ uint64_t initMBBInfo();
+ bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
+ bool mustRelaxABranch();
+ void setWorstCaseAddresses();
+ void splitBranchOnCount(MachineInstr *MI, unsigned AddOpcode);
+ void splitCompareBranch(MachineInstr *MI, unsigned CompareOpcode);
+ void relaxBranch(TerminatorInfo &Terminator);
+ void relaxBranches();
+
+ const SystemZInstrInfo *TII;
+ MachineFunction *MF;
+ SmallVector<MBBInfo, 16> MBBs;
+ SmallVector<TerminatorInfo, 16> Terminators;
+ };
+
+ char SystemZLongBranch::ID = 0;
+
+ const uint64_t MaxBackwardRange = 0x10000;
+ const uint64_t MaxForwardRange = 0xfffe;
+} // end of anonymous namespace
+
+FunctionPass *llvm::createSystemZLongBranchPass(SystemZTargetMachine &TM) {
+ return new SystemZLongBranch(TM);
+}
+
+// Position describes the state immediately before Block. Update Block
+// accordingly and move Position to the end of the block's non-terminator
+// instructions.
+void SystemZLongBranch::skipNonTerminators(BlockPosition &Position,
+ MBBInfo &Block) {
+ if (Block.Alignment > Position.KnownBits) {
+ // When calculating the address of Block, we need to conservatively
+ // assume that Block had the worst possible misalignment.
+ Position.Address += ((uint64_t(1) << Block.Alignment) -
+ (uint64_t(1) << Position.KnownBits));
+ Position.KnownBits = Block.Alignment;
+ }
+
+ // Align the addresses.
+ uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1;
+ Position.Address = (Position.Address + AlignMask) & ~AlignMask;
+
+ // Record the block's position.
+ Block.Address = Position.Address;
+
+ // Move past the non-terminators in the block.
+ Position.Address += Block.Size;
+}
+
+// Position describes the state immediately before Terminator.
+// Update Terminator accordingly and move Position past it.
+// Assume that Terminator will be relaxed if AssumeRelaxed.
+void SystemZLongBranch::skipTerminator(BlockPosition &Position,
+ TerminatorInfo &Terminator,
+ bool AssumeRelaxed) {
+ Terminator.Address = Position.Address;
+ Position.Address += Terminator.Size;
+ if (AssumeRelaxed)
+ Position.Address += Terminator.ExtraRelaxSize;
+}
+
+// Return a description of terminator instruction MI.
+TerminatorInfo SystemZLongBranch::describeTerminator(MachineInstr *MI) {
+ TerminatorInfo Terminator;
+ Terminator.Size = TII->getInstSizeInBytes(MI);
+ if (MI->isConditionalBranch() || MI->isUnconditionalBranch()) {
+ switch (MI->getOpcode()) {
+ case SystemZ::J:
+ // Relaxes to JG, which is 2 bytes longer.
+ Terminator.ExtraRelaxSize = 2;
+ break;
+ case SystemZ::BRC:
+ // Relaxes to BRCL, which is 2 bytes longer.
+ Terminator.ExtraRelaxSize = 2;
+ break;
+ case SystemZ::BRCT:
+ case SystemZ::BRCTG:
+ // Relaxes to A(G)HI and BRCL, which is 6 bytes longer.
+ Terminator.ExtraRelaxSize = 6;
+ break;
+ case SystemZ::CRJ:
+ case SystemZ::CLRJ:
+ // Relaxes to a C(L)R/BRCL sequence, which is 2 bytes longer.
+ Terminator.ExtraRelaxSize = 2;
+ break;
+ case SystemZ::CGRJ:
+ case SystemZ::CLGRJ:
+ // Relaxes to a C(L)GR/BRCL sequence, which is 4 bytes longer.
+ Terminator.ExtraRelaxSize = 4;
+ break;
+ case SystemZ::CIJ:
+ case SystemZ::CGIJ:
+ // Relaxes to a C(G)HI/BRCL sequence, which is 4 bytes longer.
+ Terminator.ExtraRelaxSize = 4;
+ break;
+ case SystemZ::CLIJ:
+ case SystemZ::CLGIJ:
+ // Relaxes to a CL(G)FI/BRCL sequence, which is 6 bytes longer.
+ Terminator.ExtraRelaxSize = 6;
+ break;
+ default:
+ llvm_unreachable("Unrecognized branch instruction");
+ }
+ Terminator.Branch = MI;
+ Terminator.TargetBlock =
+ TII->getBranchInfo(MI).Target->getMBB()->getNumber();
+ }
+ return Terminator;
+}
+
+// Fill MBBs and Terminators, setting the addresses on the assumption
+// that no branches need relaxation. Return the size of the function under
+// this assumption.
+uint64_t SystemZLongBranch::initMBBInfo() {
+ MF->RenumberBlocks();
+ unsigned NumBlocks = MF->size();
+
+ MBBs.clear();
+ MBBs.resize(NumBlocks);
+
+ Terminators.clear();
+ Terminators.reserve(NumBlocks);
+
+ BlockPosition Position(MF->getAlignment());
+ for (unsigned I = 0; I < NumBlocks; ++I) {
+ MachineBasicBlock *MBB = MF->getBlockNumbered(I);
+ MBBInfo &Block = MBBs[I];
+
+ // Record the alignment, for quick access.
+ Block.Alignment = MBB->getAlignment();
+
+ // Calculate the size of the fixed part of the block.
+ MachineBasicBlock::iterator MI = MBB->begin();
+ MachineBasicBlock::iterator End = MBB->end();
+ while (MI != End && !MI->isTerminator()) {
+ Block.Size += TII->getInstSizeInBytes(MI);
+ ++MI;
+ }
+ skipNonTerminators(Position, Block);
+
+ // Add the terminators.
+ while (MI != End) {
+ if (!MI->isDebugValue()) {
+ assert(MI->isTerminator() && "Terminator followed by non-terminator");
+ Terminators.push_back(describeTerminator(MI));
+ skipTerminator(Position, Terminators.back(), false);
+ ++Block.NumTerminators;
+ }
+ ++MI;
+ }
+ }
+
+ return Position.Address;
+}
+
+// Return true if, under current assumptions, Terminator would need to be
+// relaxed if it were placed at address Address.
+bool SystemZLongBranch::mustRelaxBranch(const TerminatorInfo &Terminator,
+ uint64_t Address) {
+ if (!Terminator.Branch)
+ return false;
+
+ const MBBInfo &Target = MBBs[Terminator.TargetBlock];
+ if (Address >= Target.Address) {
+ if (Address - Target.Address <= MaxBackwardRange)
+ return false;
+ } else {
+ if (Target.Address - Address <= MaxForwardRange)
+ return false;
+ }
+
+ return true;
+}
+
+// Return true if, under current assumptions, any terminator needs
+// to be relaxed.
+bool SystemZLongBranch::mustRelaxABranch() {
+ for (SmallVectorImpl<TerminatorInfo>::iterator TI = Terminators.begin(),
+ TE = Terminators.end(); TI != TE; ++TI)
+ if (mustRelaxBranch(*TI, TI->Address))
+ return true;
+ return false;
+}
+
+// Set the address of each block on the assumption that all branches
+// must be long.
+void SystemZLongBranch::setWorstCaseAddresses() {
+ SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
+ BlockPosition Position(MF->getAlignment());
+ for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
+ BI != BE; ++BI) {
+ skipNonTerminators(Position, *BI);
+ for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ skipTerminator(Position, *TI, true);
+ ++TI;
+ }
+ }
+}
+
+// Split BRANCH ON COUNT MI into the addition given by AddOpcode followed
+// by a BRCL on the result.
+void SystemZLongBranch::splitBranchOnCount(MachineInstr *MI,
+ unsigned AddOpcode) {
+ MachineBasicBlock *MBB = MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ BuildMI(*MBB, MI, DL, TII->get(AddOpcode))
+ .addOperand(MI->getOperand(0))
+ .addOperand(MI->getOperand(1))
+ .addImm(-1);
+ MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL))
+ .addImm(SystemZ::CCMASK_ICMP)
+ .addImm(SystemZ::CCMASK_CMP_NE)
+ .addOperand(MI->getOperand(2));
+ // The implicit use of CC is a killing use.
+ BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo());
+ MI->eraseFromParent();
+}
+
+// Split MI into the comparison given by CompareOpcode followed
+// a BRCL on the result.
+void SystemZLongBranch::splitCompareBranch(MachineInstr *MI,
+ unsigned CompareOpcode) {
+ MachineBasicBlock *MBB = MI->getParent();
+ DebugLoc DL = MI->getDebugLoc();
+ BuildMI(*MBB, MI, DL, TII->get(CompareOpcode))
+ .addOperand(MI->getOperand(0))
+ .addOperand(MI->getOperand(1));
+ MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL))
+ .addImm(SystemZ::CCMASK_ICMP)
+ .addOperand(MI->getOperand(2))
+ .addOperand(MI->getOperand(3));
+ // The implicit use of CC is a killing use.
+ BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo());
+ MI->eraseFromParent();
+}
+
+// Relax the branch described by Terminator.
+void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
+ MachineInstr *Branch = Terminator.Branch;
+ switch (Branch->getOpcode()) {
+ case SystemZ::J:
+ Branch->setDesc(TII->get(SystemZ::JG));
+ break;
+ case SystemZ::BRC:
+ Branch->setDesc(TII->get(SystemZ::BRCL));
+ break;
+ case SystemZ::BRCT:
+ splitBranchOnCount(Branch, SystemZ::AHI);
+ break;
+ case SystemZ::BRCTG:
+ splitBranchOnCount(Branch, SystemZ::AGHI);
+ break;
+ case SystemZ::CRJ:
+ splitCompareBranch(Branch, SystemZ::CR);
+ break;
+ case SystemZ::CGRJ:
+ splitCompareBranch(Branch, SystemZ::CGR);
+ break;
+ case SystemZ::CIJ:
+ splitCompareBranch(Branch, SystemZ::CHI);
+ break;
+ case SystemZ::CGIJ:
+ splitCompareBranch(Branch, SystemZ::CGHI);
+ break;
+ case SystemZ::CLRJ:
+ splitCompareBranch(Branch, SystemZ::CLR);
+ break;
+ case SystemZ::CLGRJ:
+ splitCompareBranch(Branch, SystemZ::CLGR);
+ break;
+ case SystemZ::CLIJ:
+ splitCompareBranch(Branch, SystemZ::CLFI);
+ break;
+ case SystemZ::CLGIJ:
+ splitCompareBranch(Branch, SystemZ::CLGFI);
+ break;
+ default:
+ llvm_unreachable("Unrecognized branch");
+ }
+
+ Terminator.Size += Terminator.ExtraRelaxSize;
+ Terminator.ExtraRelaxSize = 0;
+ Terminator.Branch = 0;
+
+ ++LongBranches;
+}
+
+// Run a shortening pass and relax any branches that need to be relaxed.
+void SystemZLongBranch::relaxBranches() {
+ SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
+ BlockPosition Position(MF->getAlignment());
+ for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
+ BI != BE; ++BI) {
+ skipNonTerminators(Position, *BI);
+ for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ assert(Position.Address <= TI->Address &&
+ "Addresses shouldn't go forwards");
+ if (mustRelaxBranch(*TI, Position.Address))
+ relaxBranch(*TI);
+ skipTerminator(Position, *TI, false);
+ ++TI;
+ }
+ }
+}
+
+bool SystemZLongBranch::runOnMachineFunction(MachineFunction &F) {
+ TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
+ MF = &F;
+ uint64_t Size = initMBBInfo();
+ if (Size <= MaxForwardRange || !mustRelaxABranch())
+ return false;
+
+ setWorstCaseAddresses();
+ relaxBranches();
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
new file mode 100644
index 000000000000..ff9a6c0a221f
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
@@ -0,0 +1,100 @@
+//===-- SystemZMCInstLower.cpp - Lower MachineInstr to MCInst -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZMCInstLower.h"
+#include "SystemZAsmPrinter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Target/Mangler.h"
+
+using namespace llvm;
+
+// Return the VK_* enumeration for MachineOperand target flags Flags.
+static MCSymbolRefExpr::VariantKind getVariantKind(unsigned Flags) {
+ switch (Flags & SystemZII::MO_SYMBOL_MODIFIER) {
+ case 0:
+ return MCSymbolRefExpr::VK_None;
+ case SystemZII::MO_GOT:
+ return MCSymbolRefExpr::VK_GOT;
+ }
+ llvm_unreachable("Unrecognised MO_ACCESS_MODEL");
+}
+
+SystemZMCInstLower::SystemZMCInstLower(MCContext &ctx,
+ SystemZAsmPrinter &asmprinter)
+ : Ctx(ctx), AsmPrinter(asmprinter) {}
+
+const MCExpr *
+SystemZMCInstLower::getExpr(const MachineOperand &MO,
+ MCSymbolRefExpr::VariantKind Kind) const {
+ const MCSymbol *Symbol;
+ bool HasOffset = true;
+ switch (MO.getType()) {
+ case MachineOperand::MO_MachineBasicBlock:
+ Symbol = MO.getMBB()->getSymbol();
+ HasOffset = false;
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ Symbol = AsmPrinter.getSymbol(MO.getGlobal());
+ break;
+
+ case MachineOperand::MO_ExternalSymbol:
+ Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName());
+ break;
+
+ case MachineOperand::MO_JumpTableIndex:
+ Symbol = AsmPrinter.GetJTISymbol(MO.getIndex());
+ HasOffset = false;
+ break;
+
+ case MachineOperand::MO_ConstantPoolIndex:
+ Symbol = AsmPrinter.GetCPISymbol(MO.getIndex());
+ break;
+
+ case MachineOperand::MO_BlockAddress:
+ Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress());
+ break;
+
+ default:
+ llvm_unreachable("unknown operand type");
+ }
+ const MCExpr *Expr = MCSymbolRefExpr::Create(Symbol, Kind, Ctx);
+ if (HasOffset)
+ if (int64_t Offset = MO.getOffset()) {
+ const MCExpr *OffsetExpr = MCConstantExpr::Create(Offset, Ctx);
+ Expr = MCBinaryExpr::CreateAdd(Expr, OffsetExpr, Ctx);
+ }
+ return Expr;
+}
+
+MCOperand SystemZMCInstLower::lowerOperand(const MachineOperand &MO) const {
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ return MCOperand::CreateReg(MO.getReg());
+
+ case MachineOperand::MO_Immediate:
+ return MCOperand::CreateImm(MO.getImm());
+
+ default: {
+ MCSymbolRefExpr::VariantKind Kind = getVariantKind(MO.getTargetFlags());
+ return MCOperand::CreateExpr(getExpr(MO, Kind));
+ }
+ }
+}
+
+void SystemZMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
+ OutMI.setOpcode(MI->getOpcode());
+ for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
+ const MachineOperand &MO = MI->getOperand(I);
+ // Ignore all implicit register operands.
+ if (!MO.isReg() || !MO.isImplicit())
+ OutMI.addOperand(lowerOperand(MO));
+ }
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
new file mode 100644
index 000000000000..f6d5ac8c285d
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
@@ -0,0 +1,44 @@
+//===-- SystemZMCInstLower.h - Lower MachineInstr to MCInst ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SYSTEMZMCINSTLOWER_H
+#define LLVM_SYSTEMZMCINSTLOWER_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class MCInst;
+class MCOperand;
+class MachineInstr;
+class MachineOperand;
+class Mangler;
+class SystemZAsmPrinter;
+
+class LLVM_LIBRARY_VISIBILITY SystemZMCInstLower {
+ MCContext &Ctx;
+ SystemZAsmPrinter &AsmPrinter;
+
+public:
+ SystemZMCInstLower(MCContext &ctx, SystemZAsmPrinter &asmPrinter);
+
+ // Lower MachineInstr MI to MCInst OutMI.
+ void lower(const MachineInstr *MI, MCInst &OutMI) const;
+
+ // Return an MCOperand for MO.
+ MCOperand lowerOperand(const MachineOperand& MO) const;
+
+ // Return an MCExpr for symbolic operand MO with variant kind Kind.
+ const MCExpr *getExpr(const MachineOperand &MO,
+ MCSymbolRefExpr::VariantKind Kind) const;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.cpp
new file mode 100644
index 000000000000..00572d0b9d79
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.cpp
@@ -0,0 +1,17 @@
+//== SystemZMachineFuctionInfo.cpp - SystemZ machine function info-*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZMachineFunctionInfo.h"
+
+using namespace llvm;
+
+
+// pin vtable to this file
+void SystemZMachineFunctionInfo::anchor() {}
+
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
new file mode 100644
index 000000000000..845291f4f3fa
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
@@ -0,0 +1,68 @@
+//==- SystemZMachineFuctionInfo.h - SystemZ machine function info -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZMACHINEFUNCTIONINFO_H
+#define SYSTEMZMACHINEFUNCTIONINFO_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+class SystemZMachineFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
+ unsigned LowSavedGPR;
+ unsigned HighSavedGPR;
+ unsigned VarArgsFirstGPR;
+ unsigned VarArgsFirstFPR;
+ unsigned VarArgsFrameIndex;
+ unsigned RegSaveFrameIndex;
+ bool ManipulatesSP;
+
+public:
+ explicit SystemZMachineFunctionInfo(MachineFunction &MF)
+ : LowSavedGPR(0), HighSavedGPR(0), VarArgsFirstGPR(0), VarArgsFirstFPR(0),
+ VarArgsFrameIndex(0), RegSaveFrameIndex(0), ManipulatesSP(false) {}
+
+ // Get and set the first call-saved GPR that should be saved and restored
+ // by this function. This is 0 if no GPRs need to be saved or restored.
+ unsigned getLowSavedGPR() const { return LowSavedGPR; }
+ void setLowSavedGPR(unsigned Reg) { LowSavedGPR = Reg; }
+
+ // Get and set the last call-saved GPR that should be saved and restored
+ // by this function.
+ unsigned getHighSavedGPR() const { return HighSavedGPR; }
+ void setHighSavedGPR(unsigned Reg) { HighSavedGPR = Reg; }
+
+ // Get and set the number of fixed (as opposed to variable) arguments
+ // that are passed in GPRs to this function.
+ unsigned getVarArgsFirstGPR() const { return VarArgsFirstGPR; }
+ void setVarArgsFirstGPR(unsigned GPR) { VarArgsFirstGPR = GPR; }
+
+ // Likewise FPRs.
+ unsigned getVarArgsFirstFPR() const { return VarArgsFirstFPR; }
+ void setVarArgsFirstFPR(unsigned FPR) { VarArgsFirstFPR = FPR; }
+
+ // Get and set the frame index of the first stack vararg.
+ unsigned getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
+ void setVarArgsFrameIndex(unsigned FI) { VarArgsFrameIndex = FI; }
+
+ // Get and set the frame index of the register save area
+ // (i.e. the incoming stack pointer).
+ unsigned getRegSaveFrameIndex() const { return RegSaveFrameIndex; }
+ void setRegSaveFrameIndex(unsigned FI) { RegSaveFrameIndex = FI; }
+
+ // Get and set whether the function directly manipulates the stack pointer,
+ // e.g. through STACKSAVE or STACKRESTORE.
+ bool getManipulatesSP() const { return ManipulatesSP; }
+ void setManipulatesSP(bool MSP) { ManipulatesSP = MSP; }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
new file mode 100644
index 000000000000..3ad146c57d92
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
@@ -0,0 +1,482 @@
+//===-- SystemZOperands.td - SystemZ instruction operands ----*- tblgen-*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Class definitions
+//===----------------------------------------------------------------------===//
+
+class ImmediateAsmOperand<string name>
+ : AsmOperandClass {
+ let Name = name;
+ let RenderMethod = "addImmOperands";
+}
+
+// Constructs both a DAG pattern and instruction operand for an immediate
+// of type VT. PRED returns true if a node is acceptable and XFORM returns
+// the operand value associated with the node. ASMOP is the name of the
+// associated asm operand, and also forms the basis of the asm print method.
+class Immediate<ValueType vt, code pred, SDNodeXForm xform, string asmop>
+ : PatLeaf<(vt imm), pred, xform>, Operand<vt> {
+ let PrintMethod = "print"##asmop##"Operand";
+ let DecoderMethod = "decode"##asmop##"Operand";
+ let ParserMatchClass = !cast<AsmOperandClass>(asmop);
+}
+
+// Constructs an asm operand for a PC-relative address. SIZE says how
+// many bits there are.
+class PCRelAsmOperand<string size> : ImmediateAsmOperand<"PCRel"##size> {
+ let PredicateMethod = "isImm";
+ let ParserMethod = "parsePCRel"##size;
+}
+
+// Constructs an operand for a PC-relative address with address type VT.
+// ASMOP is the associated asm operand.
+class PCRelOperand<ValueType vt, AsmOperandClass asmop> : Operand<vt> {
+ let PrintMethod = "printPCRelOperand";
+ let ParserMatchClass = asmop;
+}
+
+// Constructs both a DAG pattern and instruction operand for a PC-relative
+// address with address size VT. SELF is the name of the operand and
+// ASMOP is the associated asm operand.
+class PCRelAddress<ValueType vt, string self, AsmOperandClass asmop>
+ : ComplexPattern<vt, 1, "selectPCRelAddress",
+ [z_pcrel_wrapper, z_pcrel_offset]>,
+ PCRelOperand<vt, asmop> {
+ let MIOperandInfo = (ops !cast<Operand>(self));
+}
+
+// Constructs an AsmOperandClass for addressing mode FORMAT, treating the
+// registers as having BITSIZE bits and displacements as having DISPSIZE bits.
+// LENGTH is "LenN" for addresses with an N-bit length field, otherwise it
+// is "".
+class AddressAsmOperand<string format, string bitsize, string dispsize,
+ string length = "">
+ : AsmOperandClass {
+ let Name = format##bitsize##"Disp"##dispsize##length;
+ let ParserMethod = "parse"##format##bitsize;
+ let RenderMethod = "add"##format##"Operands";
+}
+
+// Constructs both a DAG pattern and instruction operand for an addressing mode.
+// FORMAT, BITSIZE, DISPSIZE and LENGTH are the parameters to an associated
+// AddressAsmOperand. OPERANDS is a list of NUMOPS individual operands
+// (base register, displacement, etc.). SELTYPE is the type of the memory
+// operand for selection purposes; sometimes we want different selection
+// choices for the same underlying addressing mode. SUFFIX is similarly
+// a suffix appended to the displacement for selection purposes;
+// e.g. we want to reject small 20-bit displacements if a 12-bit form
+// also exists, but we want to accept them otherwise.
+class AddressingMode<string seltype, string bitsize, string dispsize,
+ string suffix, string length, int numops, string format,
+ dag operands>
+ : ComplexPattern<!cast<ValueType>("i"##bitsize), numops,
+ "select"##seltype##dispsize##suffix##length,
+ [add, sub, or, frameindex, z_adjdynalloc]>,
+ Operand<!cast<ValueType>("i"##bitsize)> {
+ let PrintMethod = "print"##format##"Operand";
+ let EncoderMethod = "get"##format##dispsize##length##"Encoding";
+ let DecoderMethod =
+ "decode"##format##bitsize##"Disp"##dispsize##length##"Operand";
+ let MIOperandInfo = operands;
+ let ParserMatchClass =
+ !cast<AddressAsmOperand>(format##bitsize##"Disp"##dispsize##length);
+}
+
+// An addressing mode with a base and displacement but no index.
+class BDMode<string type, string bitsize, string dispsize, string suffix>
+ : AddressingMode<type, bitsize, dispsize, suffix, "", 2, "BDAddr",
+ (ops !cast<RegisterOperand>("ADDR"##bitsize),
+ !cast<Immediate>("disp"##dispsize##"imm"##bitsize))>;
+
+// An addressing mode with a base, displacement and index.
+class BDXMode<string type, string bitsize, string dispsize, string suffix>
+ : AddressingMode<type, bitsize, dispsize, suffix, "", 3, "BDXAddr",
+ (ops !cast<RegisterOperand>("ADDR"##bitsize),
+ !cast<Immediate>("disp"##dispsize##"imm"##bitsize),
+ !cast<RegisterOperand>("ADDR"##bitsize))>;
+
+// A BDMode paired with an immediate length operand of LENSIZE bits.
+class BDLMode<string type, string bitsize, string dispsize, string suffix,
+ string lensize>
+ : AddressingMode<type, bitsize, dispsize, suffix, "Len"##lensize, 3,
+ "BDLAddr",
+ (ops !cast<RegisterOperand>("ADDR"##bitsize),
+ !cast<Immediate>("disp"##dispsize##"imm"##bitsize),
+ !cast<Immediate>("imm"##bitsize))>;
+
+//===----------------------------------------------------------------------===//
+// Extracting immediate operands from nodes
+// These all create MVT::i64 nodes to ensure the value is not sign-extended
+// when converted from an SDNode to a MachineOperand later on.
+//===----------------------------------------------------------------------===//
+
+// Bits 0-15 (counting from the lsb).
+def LL16 : SDNodeXForm<imm, [{
+ uint64_t Value = N->getZExtValue() & 0x000000000000FFFFULL;
+ return CurDAG->getTargetConstant(Value, MVT::i64);
+}]>;
+
+// Bits 16-31 (counting from the lsb).
+def LH16 : SDNodeXForm<imm, [{
+ uint64_t Value = (N->getZExtValue() & 0x00000000FFFF0000ULL) >> 16;
+ return CurDAG->getTargetConstant(Value, MVT::i64);
+}]>;
+
+// Bits 32-47 (counting from the lsb).
+def HL16 : SDNodeXForm<imm, [{
+ uint64_t Value = (N->getZExtValue() & 0x0000FFFF00000000ULL) >> 32;
+ return CurDAG->getTargetConstant(Value, MVT::i64);
+}]>;
+
+// Bits 48-63 (counting from the lsb).
+def HH16 : SDNodeXForm<imm, [{
+ uint64_t Value = (N->getZExtValue() & 0xFFFF000000000000ULL) >> 48;
+ return CurDAG->getTargetConstant(Value, MVT::i64);
+}]>;
+
+// Low 32 bits.
+def LF32 : SDNodeXForm<imm, [{
+ uint64_t Value = N->getZExtValue() & 0x00000000FFFFFFFFULL;
+ return CurDAG->getTargetConstant(Value, MVT::i64);
+}]>;
+
+// High 32 bits.
+def HF32 : SDNodeXForm<imm, [{
+ uint64_t Value = N->getZExtValue() >> 32;
+ return CurDAG->getTargetConstant(Value, MVT::i64);
+}]>;
+
+// Truncate an immediate to a 8-bit signed quantity.
+def SIMM8 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(int8_t(N->getZExtValue()), MVT::i64);
+}]>;
+
+// Truncate an immediate to a 8-bit unsigned quantity.
+def UIMM8 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(uint8_t(N->getZExtValue()), MVT::i64);
+}]>;
+
+// Truncate an immediate to a 16-bit signed quantity.
+def SIMM16 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(int16_t(N->getZExtValue()), MVT::i64);
+}]>;
+
+// Truncate an immediate to a 16-bit unsigned quantity.
+def UIMM16 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(uint16_t(N->getZExtValue()), MVT::i64);
+}]>;
+
+// Truncate an immediate to a 32-bit signed quantity.
+def SIMM32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(int32_t(N->getZExtValue()), MVT::i64);
+}]>;
+
+// Truncate an immediate to a 32-bit unsigned quantity.
+def UIMM32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(uint32_t(N->getZExtValue()), MVT::i64);
+}]>;
+
+// Negate and then truncate an immediate to a 32-bit unsigned quantity.
+def NEGIMM32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(uint32_t(-N->getZExtValue()), MVT::i64);
+}]>;
+
+//===----------------------------------------------------------------------===//
+// Immediate asm operands.
+//===----------------------------------------------------------------------===//
+
+def U4Imm : ImmediateAsmOperand<"U4Imm">;
+def U6Imm : ImmediateAsmOperand<"U6Imm">;
+def S8Imm : ImmediateAsmOperand<"S8Imm">;
+def U8Imm : ImmediateAsmOperand<"U8Imm">;
+def S16Imm : ImmediateAsmOperand<"S16Imm">;
+def U16Imm : ImmediateAsmOperand<"U16Imm">;
+def S32Imm : ImmediateAsmOperand<"S32Imm">;
+def U32Imm : ImmediateAsmOperand<"U32Imm">;
+
+//===----------------------------------------------------------------------===//
+// 8-bit immediates
+//===----------------------------------------------------------------------===//
+
+def uimm8zx4 : Immediate<i8, [{
+ return isUInt<4>(N->getZExtValue());
+}], NOOP_SDNodeXForm, "U4Imm">;
+
+def uimm8zx6 : Immediate<i8, [{
+ return isUInt<6>(N->getZExtValue());
+}], NOOP_SDNodeXForm, "U6Imm">;
+
+def simm8 : Immediate<i8, [{}], SIMM8, "S8Imm">;
+def uimm8 : Immediate<i8, [{}], UIMM8, "U8Imm">;
+
+//===----------------------------------------------------------------------===//
+// i32 immediates
+//===----------------------------------------------------------------------===//
+
+// Immediates for the lower and upper 16 bits of an i32, with the other
+// bits of the i32 being zero.
+def imm32ll16 : Immediate<i32, [{
+ return SystemZ::isImmLL(N->getZExtValue());
+}], LL16, "U16Imm">;
+
+def imm32lh16 : Immediate<i32, [{
+ return SystemZ::isImmLH(N->getZExtValue());
+}], LH16, "U16Imm">;
+
+// Immediates for the lower and upper 16 bits of an i32, with the other
+// bits of the i32 being one.
+def imm32ll16c : Immediate<i32, [{
+ return SystemZ::isImmLL(uint32_t(~N->getZExtValue()));
+}], LL16, "U16Imm">;
+
+def imm32lh16c : Immediate<i32, [{
+ return SystemZ::isImmLH(uint32_t(~N->getZExtValue()));
+}], LH16, "U16Imm">;
+
+// Short immediates
+def imm32sx8 : Immediate<i32, [{
+ return isInt<8>(N->getSExtValue());
+}], SIMM8, "S8Imm">;
+
+def imm32zx8 : Immediate<i32, [{
+ return isUInt<8>(N->getZExtValue());
+}], UIMM8, "U8Imm">;
+
+def imm32zx8trunc : Immediate<i32, [{}], UIMM8, "U8Imm">;
+
+def imm32sx16 : Immediate<i32, [{
+ return isInt<16>(N->getSExtValue());
+}], SIMM16, "S16Imm">;
+
+def imm32zx16 : Immediate<i32, [{
+ return isUInt<16>(N->getZExtValue());
+}], UIMM16, "U16Imm">;
+
+def imm32sx16trunc : Immediate<i32, [{}], SIMM16, "S16Imm">;
+
+// Full 32-bit immediates. we need both signed and unsigned versions
+// because the assembler is picky. E.g. AFI requires signed operands
+// while NILF requires unsigned ones.
+def simm32 : Immediate<i32, [{}], SIMM32, "S32Imm">;
+def uimm32 : Immediate<i32, [{}], UIMM32, "U32Imm">;
+
+def imm32 : ImmLeaf<i32, [{}]>;
+
+//===----------------------------------------------------------------------===//
+// 64-bit immediates
+//===----------------------------------------------------------------------===//
+
+// Immediates for 16-bit chunks of an i64, with the other bits of the
+// i32 being zero.
+def imm64ll16 : Immediate<i64, [{
+ return SystemZ::isImmLL(N->getZExtValue());
+}], LL16, "U16Imm">;
+
+def imm64lh16 : Immediate<i64, [{
+ return SystemZ::isImmLH(N->getZExtValue());
+}], LH16, "U16Imm">;
+
+def imm64hl16 : Immediate<i64, [{
+ return SystemZ::isImmHL(N->getZExtValue());
+}], HL16, "U16Imm">;
+
+def imm64hh16 : Immediate<i64, [{
+ return SystemZ::isImmHH(N->getZExtValue());
+}], HH16, "U16Imm">;
+
+// Immediates for 16-bit chunks of an i64, with the other bits of the
+// i32 being one.
+def imm64ll16c : Immediate<i64, [{
+ return SystemZ::isImmLL(uint64_t(~N->getZExtValue()));
+}], LL16, "U16Imm">;
+
+def imm64lh16c : Immediate<i64, [{
+ return SystemZ::isImmLH(uint64_t(~N->getZExtValue()));
+}], LH16, "U16Imm">;
+
+def imm64hl16c : Immediate<i64, [{
+ return SystemZ::isImmHL(uint64_t(~N->getZExtValue()));
+}], HL16, "U16Imm">;
+
+def imm64hh16c : Immediate<i64, [{
+ return SystemZ::isImmHH(uint64_t(~N->getZExtValue()));
+}], HH16, "U16Imm">;
+
+// Immediates for the lower and upper 32 bits of an i64, with the other
+// bits of the i32 being zero.
+def imm64lf32 : Immediate<i64, [{
+ return SystemZ::isImmLF(N->getZExtValue());
+}], LF32, "U32Imm">;
+
+def imm64hf32 : Immediate<i64, [{
+ return SystemZ::isImmHF(N->getZExtValue());
+}], HF32, "U32Imm">;
+
+// Immediates for the lower and upper 32 bits of an i64, with the other
+// bits of the i32 being one.
+def imm64lf32c : Immediate<i64, [{
+ return SystemZ::isImmLF(uint64_t(~N->getZExtValue()));
+}], LF32, "U32Imm">;
+
+def imm64hf32c : Immediate<i64, [{
+ return SystemZ::isImmHF(uint64_t(~N->getZExtValue()));
+}], HF32, "U32Imm">;
+
+// Short immediates.
+def imm64sx8 : Immediate<i64, [{
+ return isInt<8>(N->getSExtValue());
+}], SIMM8, "S8Imm">;
+
+def imm64zx8 : Immediate<i64, [{
+ return isUInt<8>(N->getSExtValue());
+}], UIMM8, "U8Imm">;
+
+def imm64sx16 : Immediate<i64, [{
+ return isInt<16>(N->getSExtValue());
+}], SIMM16, "S16Imm">;
+
+def imm64zx16 : Immediate<i64, [{
+ return isUInt<16>(N->getZExtValue());
+}], UIMM16, "U16Imm">;
+
+def imm64sx32 : Immediate<i64, [{
+ return isInt<32>(N->getSExtValue());
+}], SIMM32, "S32Imm">;
+
+def imm64zx32 : Immediate<i64, [{
+ return isUInt<32>(N->getZExtValue());
+}], UIMM32, "U32Imm">;
+
+def imm64zx32n : Immediate<i64, [{
+ return isUInt<32>(-N->getSExtValue());
+}], NEGIMM32, "U32Imm">;
+
+def imm64 : ImmLeaf<i64, [{}]>, Operand<i64>;
+
+//===----------------------------------------------------------------------===//
+// Floating-point immediates
+//===----------------------------------------------------------------------===//
+
+// Floating-point zero.
+def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
+
+// Floating point negative zero.
+def fpimmneg0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(-0.0); }]>;
+
+//===----------------------------------------------------------------------===//
+// Symbolic address operands
+//===----------------------------------------------------------------------===//
+
+// PC-relative asm operands.
+def PCRel16 : PCRelAsmOperand<"16">;
+def PCRel32 : PCRelAsmOperand<"32">;
+
+// PC-relative offsets of a basic block. The offset is sign-extended
+// and multiplied by 2.
+def brtarget16 : PCRelOperand<OtherVT, PCRel16> {
+ let EncoderMethod = "getPC16DBLEncoding";
+ let DecoderMethod = "decodePC16DBLOperand";
+}
+def brtarget32 : PCRelOperand<OtherVT, PCRel32> {
+ let EncoderMethod = "getPC32DBLEncoding";
+ let DecoderMethod = "decodePC32DBLOperand";
+}
+
+// A PC-relative offset of a global value. The offset is sign-extended
+// and multiplied by 2.
+def pcrel32 : PCRelAddress<i64, "pcrel32", PCRel32> {
+ let EncoderMethod = "getPC32DBLEncoding";
+ let DecoderMethod = "decodePC32DBLOperand";
+}
+
+//===----------------------------------------------------------------------===//
+// Addressing modes
+//===----------------------------------------------------------------------===//
+
+// 12-bit displacement operands.
+def disp12imm32 : Operand<i32>;
+def disp12imm64 : Operand<i64>;
+
+// 20-bit displacement operands.
+def disp20imm32 : Operand<i32>;
+def disp20imm64 : Operand<i64>;
+
+def BDAddr32Disp12 : AddressAsmOperand<"BDAddr", "32", "12">;
+def BDAddr32Disp20 : AddressAsmOperand<"BDAddr", "32", "20">;
+def BDAddr64Disp12 : AddressAsmOperand<"BDAddr", "64", "12">;
+def BDAddr64Disp20 : AddressAsmOperand<"BDAddr", "64", "20">;
+def BDXAddr64Disp12 : AddressAsmOperand<"BDXAddr", "64", "12">;
+def BDXAddr64Disp20 : AddressAsmOperand<"BDXAddr", "64", "20">;
+def BDLAddr64Disp12Len8 : AddressAsmOperand<"BDLAddr", "64", "12", "Len8">;
+
+// DAG patterns and operands for addressing modes. Each mode has
+// the form <type><range><group>[<len>] where:
+//
+// <type> is one of:
+// shift : base + displacement (32-bit)
+// bdaddr : base + displacement
+// mviaddr : like bdaddr, but reject cases with a natural index
+// bdxaddr : base + displacement + index
+// laaddr : like bdxaddr, but used for Load Address operations
+// dynalloc : base + displacement + index + ADJDYNALLOC
+// bdladdr : base + displacement with a length field
+//
+// <range> is one of:
+// 12 : the displacement is an unsigned 12-bit value
+// 20 : the displacement is a signed 20-bit value
+//
+// <group> is one of:
+// pair : used when there is an equivalent instruction with the opposite
+// range value (12 or 20)
+// only : used when there is no equivalent instruction with the opposite
+// range value
+//
+// <len> is one of:
+//
+// <empty> : there is no length field
+// len8 : the length field is 8 bits, with a range of [1, 0x100].
+def shift12only : BDMode <"BDAddr", "32", "12", "Only">;
+def shift20only : BDMode <"BDAddr", "32", "20", "Only">;
+def bdaddr12only : BDMode <"BDAddr", "64", "12", "Only">;
+def bdaddr12pair : BDMode <"BDAddr", "64", "12", "Pair">;
+def bdaddr20only : BDMode <"BDAddr", "64", "20", "Only">;
+def bdaddr20pair : BDMode <"BDAddr", "64", "20", "Pair">;
+def mviaddr12pair : BDMode <"MVIAddr", "64", "12", "Pair">;
+def mviaddr20pair : BDMode <"MVIAddr", "64", "20", "Pair">;
+def bdxaddr12only : BDXMode<"BDXAddr", "64", "12", "Only">;
+def bdxaddr12pair : BDXMode<"BDXAddr", "64", "12", "Pair">;
+def bdxaddr20only : BDXMode<"BDXAddr", "64", "20", "Only">;
+def bdxaddr20only128 : BDXMode<"BDXAddr", "64", "20", "Only128">;
+def bdxaddr20pair : BDXMode<"BDXAddr", "64", "20", "Pair">;
+def dynalloc12only : BDXMode<"DynAlloc", "64", "12", "Only">;
+def laaddr12pair : BDXMode<"LAAddr", "64", "12", "Pair">;
+def laaddr20pair : BDXMode<"LAAddr", "64", "20", "Pair">;
+def bdladdr12onlylen8 : BDLMode<"BDLAddr", "64", "12", "Only", "8">;
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous
+//===----------------------------------------------------------------------===//
+
+// Access registers. At present we just use them for accessing the thread
+// pointer, so we don't expose them as register to LLVM.
+def AccessReg : AsmOperandClass {
+ let Name = "AccessReg";
+ let ParserMethod = "parseAccessReg";
+}
+def access_reg : Immediate<i8, [{ return N->getZExtValue() < 16; }],
+ NOOP_SDNodeXForm, "AccessReg"> {
+ let ParserMatchClass = AccessReg;
+}
+
+// A 4-bit condition-code mask.
+def cond4 : PatLeaf<(i8 imm), [{ return (N->getZExtValue() < 16); }]>,
+ Operand<i8> {
+ let PrintMethod = "printCond4Operand";
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
new file mode 100644
index 000000000000..31cabaa3413f
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
@@ -0,0 +1,378 @@
+//===-- SystemZOperators.td - SystemZ-specific operators ------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Type profiles
+//===----------------------------------------------------------------------===//
+def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i64>]>;
+def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i64>,
+ SDTCisVT<1, i64>]>;
+def SDT_ZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
+def SDT_ZCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
+def SDT_ZICmp : SDTypeProfile<0, 3,
+ [SDTCisSameAs<0, 1>,
+ SDTCisVT<2, i32>]>;
+def SDT_ZBRCCMask : SDTypeProfile<0, 3,
+ [SDTCisVT<0, i8>,
+ SDTCisVT<1, i8>,
+ SDTCisVT<2, OtherVT>]>;
+def SDT_ZSelectCCMask : SDTypeProfile<1, 4,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<1, 2>,
+ SDTCisVT<3, i8>,
+ SDTCisVT<4, i8>]>;
+def SDT_ZWrapPtr : SDTypeProfile<1, 1,
+ [SDTCisSameAs<0, 1>,
+ SDTCisPtrTy<0>]>;
+def SDT_ZWrapOffset : SDTypeProfile<1, 2,
+ [SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>,
+ SDTCisPtrTy<0>]>;
+def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
+def SDT_ZExtractAccess : SDTypeProfile<1, 1,
+ [SDTCisVT<0, i32>,
+ SDTCisVT<1, i8>]>;
+def SDT_ZGR128Binary32 : SDTypeProfile<1, 2,
+ [SDTCisVT<0, untyped>,
+ SDTCisVT<1, untyped>,
+ SDTCisVT<2, i32>]>;
+def SDT_ZGR128Binary64 : SDTypeProfile<1, 2,
+ [SDTCisVT<0, untyped>,
+ SDTCisVT<1, untyped>,
+ SDTCisVT<2, i64>]>;
+def SDT_ZAtomicLoadBinaryW : SDTypeProfile<1, 5,
+ [SDTCisVT<0, i32>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, i32>,
+ SDTCisVT<3, i32>,
+ SDTCisVT<4, i32>,
+ SDTCisVT<5, i32>]>;
+def SDT_ZAtomicCmpSwapW : SDTypeProfile<1, 6,
+ [SDTCisVT<0, i32>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, i32>,
+ SDTCisVT<3, i32>,
+ SDTCisVT<4, i32>,
+ SDTCisVT<5, i32>,
+ SDTCisVT<6, i32>]>;
+def SDT_ZMemMemLength : SDTypeProfile<0, 3,
+ [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, i64>]>;
+def SDT_ZMemMemLoop : SDTypeProfile<0, 4,
+ [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>,
+ SDTCisVT<2, i64>,
+ SDTCisVT<3, i64>]>;
+def SDT_ZString : SDTypeProfile<1, 3,
+ [SDTCisPtrTy<0>,
+ SDTCisPtrTy<1>,
+ SDTCisPtrTy<2>,
+ SDTCisVT<3, i32>]>;
+def SDT_ZI32Intrinsic : SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>;
+def SDT_ZPrefetch : SDTypeProfile<0, 2,
+ [SDTCisVT<0, i8>,
+ SDTCisPtrTy<1>]>;
+
+//===----------------------------------------------------------------------===//
+// Node definitions
+//===----------------------------------------------------------------------===//
+
+// These are target-independent nodes, but have target-specific formats.
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
+ [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
+ [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue,
+ SDNPOutGlue]>;
+
+// Nodes for SystemZISD::*. See SystemZISelLowering.h for more details.
+def z_retflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def z_call : SDNode<"SystemZISD::CALL", SDT_ZCall,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
+ SDNPVariadic]>;
+def z_sibcall : SDNode<"SystemZISD::SIBCALL", SDT_ZCall,
+ [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
+ SDNPVariadic]>;
+def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
+def z_pcrel_offset : SDNode<"SystemZISD::PCREL_OFFSET",
+ SDT_ZWrapOffset, []>;
+def z_icmp : SDNode<"SystemZISD::ICMP", SDT_ZICmp, [SDNPOutGlue]>;
+def z_fcmp : SDNode<"SystemZISD::FCMP", SDT_ZCmp, [SDNPOutGlue]>;
+def z_tm : SDNode<"SystemZISD::TM", SDT_ZICmp, [SDNPOutGlue]>;
+def z_br_ccmask : SDNode<"SystemZISD::BR_CCMASK", SDT_ZBRCCMask,
+ [SDNPHasChain, SDNPInGlue]>;
+def z_select_ccmask : SDNode<"SystemZISD::SELECT_CCMASK", SDT_ZSelectCCMask,
+ [SDNPInGlue]>;
+def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>;
+def z_extract_access : SDNode<"SystemZISD::EXTRACT_ACCESS",
+ SDT_ZExtractAccess>;
+def z_umul_lohi64 : SDNode<"SystemZISD::UMUL_LOHI64", SDT_ZGR128Binary64>;
+def z_sdivrem32 : SDNode<"SystemZISD::SDIVREM32", SDT_ZGR128Binary32>;
+def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>;
+def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>;
+def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>;
+
+class AtomicWOp<string name, SDTypeProfile profile = SDT_ZAtomicLoadBinaryW>
+ : SDNode<"SystemZISD::"##name, profile,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+
+def z_atomic_swapw : AtomicWOp<"ATOMIC_SWAPW">;
+def z_atomic_loadw_add : AtomicWOp<"ATOMIC_LOADW_ADD">;
+def z_atomic_loadw_sub : AtomicWOp<"ATOMIC_LOADW_SUB">;
+def z_atomic_loadw_and : AtomicWOp<"ATOMIC_LOADW_AND">;
+def z_atomic_loadw_or : AtomicWOp<"ATOMIC_LOADW_OR">;
+def z_atomic_loadw_xor : AtomicWOp<"ATOMIC_LOADW_XOR">;
+def z_atomic_loadw_nand : AtomicWOp<"ATOMIC_LOADW_NAND">;
+def z_atomic_loadw_min : AtomicWOp<"ATOMIC_LOADW_MIN">;
+def z_atomic_loadw_max : AtomicWOp<"ATOMIC_LOADW_MAX">;
+def z_atomic_loadw_umin : AtomicWOp<"ATOMIC_LOADW_UMIN">;
+def z_atomic_loadw_umax : AtomicWOp<"ATOMIC_LOADW_UMAX">;
+def z_atomic_cmp_swapw : AtomicWOp<"ATOMIC_CMP_SWAPW", SDT_ZAtomicCmpSwapW>;
+
+def z_mvc : SDNode<"SystemZISD::MVC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_mvc_loop : SDNode<"SystemZISD::MVC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_nc : SDNode<"SystemZISD::NC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_nc_loop : SDNode<"SystemZISD::NC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_oc : SDNode<"SystemZISD::OC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_oc_loop : SDNode<"SystemZISD::OC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_xc : SDNode<"SystemZISD::XC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_xc_loop : SDNode<"SystemZISD::XC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_clc : SDNode<"SystemZISD::CLC", SDT_ZMemMemLength,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_clc_loop : SDNode<"SystemZISD::CLC_LOOP", SDT_ZMemMemLoop,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_strcmp : SDNode<"SystemZISD::STRCMP", SDT_ZString,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_stpcpy : SDNode<"SystemZISD::STPCPY", SDT_ZString,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
+def z_search_string : SDNode<"SystemZISD::SEARCH_STRING", SDT_ZString,
+ [SDNPHasChain, SDNPOutGlue, SDNPMayLoad]>;
+def z_ipm : SDNode<"SystemZISD::IPM", SDT_ZI32Intrinsic,
+ [SDNPInGlue]>;
+def z_prefetch : SDNode<"SystemZISD::PREFETCH", SDT_ZPrefetch,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
+ SDNPMemOperand]>;
+
+//===----------------------------------------------------------------------===//
+// Pattern fragments
+//===----------------------------------------------------------------------===//
+
+// Signed and unsigned comparisons.
+def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{
+ unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ return Type != SystemZICMP::UnsignedOnly;
+}]>;
+def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, imm), [{
+ unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
+ return Type != SystemZICMP::SignedOnly;
+}]>;
+
+// Register- and memory-based TEST UNDER MASK.
+def z_tm_reg : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, imm)>;
+def z_tm_mem : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, 0)>;
+
+// Register sign-extend operations. Sub-32-bit values are represented as i32s.
+def sext8 : PatFrag<(ops node:$src), (sext_inreg node:$src, i8)>;
+def sext16 : PatFrag<(ops node:$src), (sext_inreg node:$src, i16)>;
+def sext32 : PatFrag<(ops node:$src), (sext (i32 node:$src))>;
+
+// Register zero-extend operations. Sub-32-bit values are represented as i32s.
+def zext8 : PatFrag<(ops node:$src), (and node:$src, 0xff)>;
+def zext16 : PatFrag<(ops node:$src), (and node:$src, 0xffff)>;
+def zext32 : PatFrag<(ops node:$src), (zext (i32 node:$src))>;
+
+// Typed floating-point loads.
+def loadf32 : PatFrag<(ops node:$src), (f32 (load node:$src))>;
+def loadf64 : PatFrag<(ops node:$src), (f64 (load node:$src))>;
+
+// Extending loads in which the extension type can be signed.
+def asextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
+ return Type == ISD::EXTLOAD || Type == ISD::SEXTLOAD;
+}]>;
+def asextloadi8 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def asextloadi16 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def asextloadi32 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+// Extending loads in which the extension type can be unsigned.
+def azextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
+ return Type == ISD::EXTLOAD || Type == ISD::ZEXTLOAD;
+}]>;
+def azextloadi8 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def azextloadi16 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def azextloadi32 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+// Extending loads in which the extension type doesn't matter.
+def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;
+}]>;
+def anyextloadi8 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def anyextloadi16 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+// Aligned loads.
+class AlignedLoad<SDPatternOperator load>
+ : PatFrag<(ops node:$addr), (load node:$addr), [{
+ LoadSDNode *Load = cast<LoadSDNode>(N);
+ return Load->getAlignment() >= Load->getMemoryVT().getStoreSize();
+}]>;
+def aligned_load : AlignedLoad<load>;
+def aligned_asextloadi16 : AlignedLoad<asextloadi16>;
+def aligned_asextloadi32 : AlignedLoad<asextloadi32>;
+def aligned_azextloadi16 : AlignedLoad<azextloadi16>;
+def aligned_azextloadi32 : AlignedLoad<azextloadi32>;
+
+// Aligned stores.
+class AlignedStore<SDPatternOperator store>
+ : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
+ StoreSDNode *Store = cast<StoreSDNode>(N);
+ return Store->getAlignment() >= Store->getMemoryVT().getStoreSize();
+}]>;
+def aligned_store : AlignedStore<store>;
+def aligned_truncstorei16 : AlignedStore<truncstorei16>;
+def aligned_truncstorei32 : AlignedStore<truncstorei32>;
+
+// Non-volatile loads. Used for instructions that might access the storage
+// location multiple times.
+class NonvolatileLoad<SDPatternOperator load>
+ : PatFrag<(ops node:$addr), (load node:$addr), [{
+ LoadSDNode *Load = cast<LoadSDNode>(N);
+ return !Load->isVolatile();
+}]>;
+def nonvolatile_load : NonvolatileLoad<load>;
+def nonvolatile_anyextloadi8 : NonvolatileLoad<anyextloadi8>;
+def nonvolatile_anyextloadi16 : NonvolatileLoad<anyextloadi16>;
+def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
+
+// Non-volatile stores.
+class NonvolatileStore<SDPatternOperator store>
+ : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
+ StoreSDNode *Store = cast<StoreSDNode>(N);
+ return !Store->isVolatile();
+}]>;
+def nonvolatile_store : NonvolatileStore<store>;
+def nonvolatile_truncstorei8 : NonvolatileStore<truncstorei8>;
+def nonvolatile_truncstorei16 : NonvolatileStore<truncstorei16>;
+def nonvolatile_truncstorei32 : NonvolatileStore<truncstorei32>;
+
+// A store of a load that can be implemented using MVC.
+def mvc_store : PatFrag<(ops node:$value, node:$addr),
+ (unindexedstore node:$value, node:$addr),
+ [{ return storeLoadCanUseMVC(N); }]>;
+
+// Binary read-modify-write operations on memory in which the other
+// operand is also memory and for which block operations like NC can
+// be used. There are two patterns for each operator, depending on
+// which operand contains the "other" load.
+multiclass block_op<SDPatternOperator operator> {
+ def "1" : PatFrag<(ops node:$value, node:$addr),
+ (unindexedstore (operator node:$value,
+ (unindexedload node:$addr)),
+ node:$addr),
+ [{ return storeLoadCanUseBlockBinary(N, 0); }]>;
+ def "2" : PatFrag<(ops node:$value, node:$addr),
+ (unindexedstore (operator (unindexedload node:$addr),
+ node:$value),
+ node:$addr),
+ [{ return storeLoadCanUseBlockBinary(N, 1); }]>;
+}
+defm block_and : block_op<and>;
+defm block_or : block_op<or>;
+defm block_xor : block_op<xor>;
+
+// Insertions.
+def inserti8 : PatFrag<(ops node:$src1, node:$src2),
+ (or (and node:$src1, -256), node:$src2)>;
+def insertll : PatFrag<(ops node:$src1, node:$src2),
+ (or (and node:$src1, 0xffffffffffff0000), node:$src2)>;
+def insertlh : PatFrag<(ops node:$src1, node:$src2),
+ (or (and node:$src1, 0xffffffff0000ffff), node:$src2)>;
+def inserthl : PatFrag<(ops node:$src1, node:$src2),
+ (or (and node:$src1, 0xffff0000ffffffff), node:$src2)>;
+def inserthh : PatFrag<(ops node:$src1, node:$src2),
+ (or (and node:$src1, 0x0000ffffffffffff), node:$src2)>;
+def insertlf : PatFrag<(ops node:$src1, node:$src2),
+ (or (and node:$src1, 0xffffffff00000000), node:$src2)>;
+def inserthf : PatFrag<(ops node:$src1, node:$src2),
+ (or (and node:$src1, 0x00000000ffffffff), node:$src2)>;
+
+// ORs that can be treated as insertions.
+def or_as_inserti8 : PatFrag<(ops node:$src1, node:$src2),
+ (or node:$src1, node:$src2), [{
+ unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
+ return CurDAG->MaskedValueIsZero(N->getOperand(0),
+ APInt::getLowBitsSet(BitWidth, 8));
+}]>;
+
+// ORs that can be treated as reversed insertions.
+def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
+ (or node:$src1, node:$src2), [{
+ unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
+ return CurDAG->MaskedValueIsZero(N->getOperand(1),
+ APInt::getLowBitsSet(BitWidth, 8));
+}]>;
+
+// Integer absolute, matching the canonical form generated by DAGCombiner.
+def z_iabs32 : PatFrag<(ops node:$src),
+ (xor (add node:$src, (sra node:$src, (i32 31))),
+ (sra node:$src, (i32 31)))>;
+def z_iabs64 : PatFrag<(ops node:$src),
+ (xor (add node:$src, (sra node:$src, (i32 63))),
+ (sra node:$src, (i32 63)))>;
+def z_inegabs32 : PatFrag<(ops node:$src), (ineg (z_iabs32 node:$src))>;
+def z_inegabs64 : PatFrag<(ops node:$src), (ineg (z_iabs64 node:$src))>;
+
+// Fused multiply-add and multiply-subtract, but with the order of the
+// operands matching SystemZ's MA and MS instructions.
+def z_fma : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (fma node:$src2, node:$src3, node:$src1)>;
+def z_fms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
+ (fma node:$src2, node:$src3, (fneg node:$src1))>;
+
+// Floating-point negative absolute.
+def fnabs : PatFrag<(ops node:$ptr), (fneg (fabs node:$ptr))>;
+
+// Create a unary operator that loads from memory and then performs
+// the given operation on it.
+class loadu<SDPatternOperator operator, SDPatternOperator load = load>
+ : PatFrag<(ops node:$addr), (operator (load node:$addr))>;
+
+// Create a store operator that performs the given unary operation
+// on the value before storing it.
+class storeu<SDPatternOperator operator, SDPatternOperator store = store>
+ : PatFrag<(ops node:$value, node:$addr),
+ (store (operator node:$value), node:$addr)>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
new file mode 100644
index 000000000000..7706351e54b3
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
@@ -0,0 +1,152 @@
+//===-- SystemZPatterns.td - SystemZ-specific pattern rules ---*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Record that INSN performs a 64-bit version of unary operator OPERATOR
+// in which the operand is sign-extended from 32 to 64 bits.
+multiclass SXU<SDPatternOperator operator, Instruction insn> {
+ def : Pat<(operator (sext (i32 GR32:$src))),
+ (insn GR32:$src)>;
+ def : Pat<(operator (sext_inreg GR64:$src, i32)),
+ (insn (EXTRACT_SUBREG GR64:$src, subreg_l32))>;
+}
+
+// Record that INSN performs a 64-bit version of binary operator OPERATOR
+// in which the first operand has class CLS and which the second operand
+// is sign-extended from a 32-bit register.
+multiclass SXB<SDPatternOperator operator, RegisterOperand cls,
+ Instruction insn> {
+ def : Pat<(operator cls:$src1, (sext GR32:$src2)),
+ (insn cls:$src1, GR32:$src2)>;
+ def : Pat<(operator cls:$src1, (sext_inreg GR64:$src2, i32)),
+ (insn cls:$src1, (EXTRACT_SUBREG GR64:$src2, subreg_l32))>;
+}
+
+// Like SXB, but for zero extension.
+multiclass ZXB<SDPatternOperator operator, RegisterOperand cls,
+ Instruction insn> {
+ def : Pat<(operator cls:$src1, (zext GR32:$src2)),
+ (insn cls:$src1, GR32:$src2)>;
+ def : Pat<(operator cls:$src1, (and GR64:$src2, 0xffffffff)),
+ (insn cls:$src1, (EXTRACT_SUBREG GR64:$src2, subreg_l32))>;
+}
+
+// Record that INSN performs a binary read-modify-write operation,
+// with LOAD, OPERATOR and STORE being the read, modify and write
+// respectively. MODE is the addressing mode and IMM is the type
+// of the second operand.
+class RMWI<SDPatternOperator load, SDPatternOperator operator,
+ SDPatternOperator store, AddressingMode mode,
+ PatFrag imm, Instruction insn>
+ : Pat<(store (operator (load mode:$addr), imm:$src), mode:$addr),
+ (insn mode:$addr, (UIMM8 imm:$src))>;
+
+// Record that INSN performs binary operation OPERATION on a byte
+// memory location. IMM is the type of the second operand.
+multiclass RMWIByte<SDPatternOperator operator, AddressingMode mode,
+ Instruction insn> {
+ def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm32, insn>;
+ def : RMWI<anyextloadi8, operator, truncstorei8, mode, imm64, insn>;
+}
+
+// Record that INSN performs insertion TYPE into a register of class CLS.
+// The inserted operand is loaded using LOAD from an address of mode MODE.
+multiclass InsertMem<string type, Instruction insn, RegisterOperand cls,
+ SDPatternOperator load, AddressingMode mode> {
+ def : Pat<(!cast<SDPatternOperator>("or_as_"##type)
+ cls:$src1, (load mode:$src2)),
+ (insn cls:$src1, mode:$src2)>;
+ def : Pat<(!cast<SDPatternOperator>("or_as_rev"##type)
+ (load mode:$src2), cls:$src1),
+ (insn cls:$src1, mode:$src2)>;
+}
+
+// INSN stores the low 32 bits of a GPR to a memory with addressing mode MODE.
+// Record that it is equivalent to using OPERATOR to store a GR64.
+class StoreGR64<Instruction insn, SDPatternOperator operator,
+ AddressingMode mode>
+ : Pat<(operator GR64:$R1, mode:$XBD2),
+ (insn (EXTRACT_SUBREG GR64:$R1, subreg_l32), mode:$XBD2)>;
+
+// INSN and INSNY are an RX/RXY pair of instructions that store the low
+// 32 bits of a GPR to memory. Record that they are equivalent to using
+// OPERATOR to store a GR64.
+multiclass StoreGR64Pair<Instruction insn, Instruction insny,
+ SDPatternOperator operator> {
+ def : StoreGR64<insn, operator, bdxaddr12pair>;
+ def : StoreGR64<insny, operator, bdxaddr20pair>;
+}
+
+// INSN stores the low 32 bits of a GPR using PC-relative addressing.
+// Record that it is equivalent to using OPERATOR to store a GR64.
+class StoreGR64PC<Instruction insn, SDPatternOperator operator>
+ : Pat<(operator GR64:$R1, pcrel32:$XBD2),
+ (insn (EXTRACT_SUBREG GR64:$R1, subreg_l32), pcrel32:$XBD2)> {
+ // We want PC-relative addresses to be tried ahead of BD and BDX addresses.
+ // However, BDXs have two extra operands and are therefore 6 units more
+ // complex.
+ let AddedComplexity = 7;
+}
+
+// INSN and INSNINV conditionally store the low 32 bits of a GPR to memory,
+// with INSN storing when the condition is true and INSNINV storing when the
+// condition is false. Record that they are equivalent to a LOAD/select/STORE
+// sequence for GR64s.
+multiclass CondStores64<Instruction insn, Instruction insninv,
+ SDPatternOperator store, SDPatternOperator load,
+ AddressingMode mode> {
+ def : Pat<(store (z_select_ccmask GR64:$new, (load mode:$addr),
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr),
+ (insn (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
+ uimm8zx4:$valid, uimm8zx4:$cc)>;
+ def : Pat<(store (z_select_ccmask (load mode:$addr), GR64:$new,
+ uimm8zx4:$valid, uimm8zx4:$cc),
+ mode:$addr),
+ (insninv (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
+ uimm8zx4:$valid, uimm8zx4:$cc)>;
+}
+
+// Try to use MVC instruction INSN for a load of type LOAD followed by a store
+// of the same size. VT is the type of the intermediate (legalized) value and
+// LENGTH is the number of bytes loaded by LOAD.
+multiclass MVCLoadStore<SDPatternOperator load, ValueType vt, Instruction insn,
+ bits<5> length> {
+ def : Pat<(mvc_store (vt (load bdaddr12only:$src)), bdaddr12only:$dest),
+ (insn bdaddr12only:$dest, bdaddr12only:$src, length)>;
+}
+
+// Use NC-like instruction INSN for block_op operation OPERATOR.
+// The other operand is a load of type LOAD, which accesses LENGTH bytes.
+// VT is the intermediate legalized type in which the binary operation
+// is actually done.
+multiclass BinaryLoadStore<SDPatternOperator operator, SDPatternOperator load,
+ ValueType vt, Instruction insn, bits<5> length> {
+ def : Pat<(operator (vt (load bdaddr12only:$src)), bdaddr12only:$dest),
+ (insn bdaddr12only:$dest, bdaddr12only:$src, length)>;
+}
+
+// A convenient way of generating all block peepholes for a particular
+// LOAD/VT/LENGTH combination.
+multiclass BlockLoadStore<SDPatternOperator load, ValueType vt,
+ Instruction mvc, Instruction nc, Instruction oc,
+ Instruction xc, bits<5> length> {
+ defm : MVCLoadStore<load, vt, mvc, length>;
+ defm : BinaryLoadStore<block_and1, load, vt, nc, length>;
+ defm : BinaryLoadStore<block_and2, load, vt, nc, length>;
+ defm : BinaryLoadStore<block_or1, load, vt, oc, length>;
+ defm : BinaryLoadStore<block_or2, load, vt, oc, length>;
+ defm : BinaryLoadStore<block_xor1, load, vt, xc, length>;
+ defm : BinaryLoadStore<block_xor2, load, vt, xc, length>;
+}
+
+// Record that INSN is a LOAD AND TEST that can be used to compare
+// registers in CLS against zero. The instruction has separate R1 and R2
+// operands, but they must be the same when the instruction is used like this.
+class CompareZeroFP<Instruction insn, RegisterOperand cls>
+ : Pat<(z_fcmp cls:$reg, (fpimm0)), (insn cls:$reg, cls:$reg)>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
new file mode 100644
index 000000000000..f241fb0c2222
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
@@ -0,0 +1,46 @@
+//===-- SystemZ.td - SystemZ processors and features ---------*- tblgen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Processor and feature definitions.
+//
+//===----------------------------------------------------------------------===//
+
+class SystemZFeature<string extname, string intname, string desc>
+ : Predicate<"Subtarget.has"##intname##"()">,
+ AssemblerPredicate<"Feature"##intname, extname>,
+ SubtargetFeature<extname, "Has"##intname, "true", desc>;
+
+def FeatureDistinctOps : SystemZFeature<
+ "distinct-ops", "DistinctOps",
+ "Assume that the distinct-operands facility is installed"
+>;
+
+def FeatureLoadStoreOnCond : SystemZFeature<
+ "load-store-on-cond", "LoadStoreOnCond",
+ "Assume that the load/store-on-condition facility is installed"
+>;
+
+def FeatureHighWord : SystemZFeature<
+ "high-word", "HighWord",
+ "Assume that the high-word facility is installed"
+>;
+
+def FeatureFPExtension : SystemZFeature<
+ "fp-extension", "FPExtension",
+ "Assume that the floating-point extension facility is installed"
+>;
+
+def : Processor<"generic", NoItineraries, []>;
+def : Processor<"z10", NoItineraries, []>;
+def : Processor<"z196", NoItineraries,
+ [FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
+ FeatureFPExtension]>;
+def : Processor<"zEC12", NoItineraries,
+ [FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
+ FeatureFPExtension]>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
new file mode 100644
index 000000000000..b61ae88f733c
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -0,0 +1,141 @@
+//===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZRegisterInfo.h"
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+#define GET_REGINFO_TARGET_DESC
+#include "SystemZGenRegisterInfo.inc"
+
+using namespace llvm;
+
+SystemZRegisterInfo::SystemZRegisterInfo(SystemZTargetMachine &tm)
+ : SystemZGenRegisterInfo(SystemZ::R14D), TM(tm) {}
+
+const uint16_t*
+SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ static const uint16_t CalleeSavedRegs[] = {
+ SystemZ::R6D, SystemZ::R7D, SystemZ::R8D, SystemZ::R9D,
+ SystemZ::R10D, SystemZ::R11D, SystemZ::R12D, SystemZ::R13D,
+ SystemZ::R14D, SystemZ::R15D,
+ SystemZ::F8D, SystemZ::F9D, SystemZ::F10D, SystemZ::F11D,
+ SystemZ::F12D, SystemZ::F13D, SystemZ::F14D, SystemZ::F15D,
+ 0
+ };
+
+ return CalleeSavedRegs;
+}
+
+BitVector
+SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+
+ if (TFI->hasFP(MF)) {
+ // R11D is the frame pointer. Reserve all aliases.
+ Reserved.set(SystemZ::R11D);
+ Reserved.set(SystemZ::R11L);
+ Reserved.set(SystemZ::R11H);
+ Reserved.set(SystemZ::R10Q);
+ }
+
+ // R15D is the stack pointer. Reserve all aliases.
+ Reserved.set(SystemZ::R15D);
+ Reserved.set(SystemZ::R15L);
+ Reserved.set(SystemZ::R15H);
+ Reserved.set(SystemZ::R14Q);
+ return Reserved;
+}
+
+void
+SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
+
+ MachineBasicBlock &MBB = *MI->getParent();
+ MachineFunction &MF = *MBB.getParent();
+ const SystemZInstrInfo &TII =
+ *static_cast<const SystemZInstrInfo*>(TM.getInstrInfo());
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ DebugLoc DL = MI->getDebugLoc();
+
+ // Decompose the frame index into a base and offset.
+ int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
+ unsigned BasePtr = getFrameRegister(MF);
+ int64_t Offset = (TFI->getFrameIndexOffset(MF, FrameIndex) +
+ MI->getOperand(FIOperandNum + 1).getImm());
+
+ // Special handling of dbg_value instructions.
+ if (MI->isDebugValue()) {
+ MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
+ MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ return;
+ }
+
+ // See if the offset is in range, or if an equivalent instruction that
+ // accepts the offset exists.
+ unsigned Opcode = MI->getOpcode();
+ unsigned OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset);
+ if (OpcodeForOffset)
+ MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
+ else {
+ // Create an anchor point that is in range. Start at 0xffff so that
+ // can use LLILH to load the immediate.
+ int64_t OldOffset = Offset;
+ int64_t Mask = 0xffff;
+ do {
+ Offset = OldOffset & Mask;
+ OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset);
+ Mask >>= 1;
+ assert(Mask && "One offset must be OK");
+ } while (!OpcodeForOffset);
+
+ unsigned ScratchReg =
+ MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ int64_t HighOffset = OldOffset - Offset;
+
+ if (MI->getDesc().TSFlags & SystemZII::HasIndex
+ && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
+ // Load the offset into the scratch register and use it as an index.
+ // The scratch register then dies here.
+ TII.loadImmediate(MBB, MI, ScratchReg, HighOffset);
+ MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
+ MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
+ false, false, true);
+ } else {
+ // Load the anchor address into a scratch register.
+ unsigned LAOpcode = TII.getOpcodeForOffset(SystemZ::LA, HighOffset);
+ if (LAOpcode)
+ BuildMI(MBB, MI, DL, TII.get(LAOpcode),ScratchReg)
+ .addReg(BasePtr).addImm(HighOffset).addReg(0);
+ else {
+ // Load the high offset into the scratch register and use it as
+ // an index.
+ TII.loadImmediate(MBB, MI, ScratchReg, HighOffset);
+ BuildMI(MBB, MI, DL, TII.get(SystemZ::AGR),ScratchReg)
+ .addReg(ScratchReg, RegState::Kill).addReg(BasePtr);
+ }
+
+ // Use the scratch register as the base. It then dies here.
+ MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
+ false, false, true);
+ }
+ }
+ MI->setDesc(TII.get(OpcodeForOffset));
+ MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+}
+
+unsigned
+SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
+ return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
new file mode 100644
index 000000000000..13f45faba076
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -0,0 +1,68 @@
+//===-- SystemZRegisterInfo.h - SystemZ register information ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SystemZREGISTERINFO_H
+#define SystemZREGISTERINFO_H
+
+#include "SystemZ.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "SystemZGenRegisterInfo.inc"
+
+namespace llvm {
+
+namespace SystemZ {
+ // Return the subreg to use for referring to the even and odd registers
+ // in a GR128 pair. Is32Bit says whether we want a GR32 or GR64.
+ inline unsigned even128(bool Is32bit) {
+ return Is32bit ? subreg_hl32 : subreg_h64;
+ }
+ inline unsigned odd128(bool Is32bit) {
+ return Is32bit ? subreg_l32 : subreg_l64;
+ }
+}
+
+class SystemZSubtarget;
+class SystemZInstrInfo;
+
+struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
+private:
+ SystemZTargetMachine &TM;
+
+public:
+ SystemZRegisterInfo(SystemZTargetMachine &tm);
+
+ // Override TargetRegisterInfo.h.
+ virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
+ LLVM_OVERRIDE {
+ return true;
+ }
+ virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const
+ LLVM_OVERRIDE {
+ return true;
+ }
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
+ LLVM_OVERRIDE {
+ return true;
+ }
+ virtual const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0)
+ const LLVM_OVERRIDE;
+ virtual BitVector getReservedRegs(const MachineFunction &MF)
+ const LLVM_OVERRIDE;
+ virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const LLVM_OVERRIDE;
+ virtual unsigned getFrameRegister(const MachineFunction &MF) const
+ LLVM_OVERRIDE;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
new file mode 100644
index 000000000000..93d7c8375b3d
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
@@ -0,0 +1,167 @@
+//==- SystemZRegisterInfo.td - SystemZ register definitions -*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Class definitions.
+//===----------------------------------------------------------------------===//
+
+class SystemZReg<string n> : Register<n> {
+ let Namespace = "SystemZ";
+}
+
+class SystemZRegWithSubregs<string n, list<Register> subregs>
+ : RegisterWithSubRegs<n, subregs> {
+ let Namespace = "SystemZ";
+}
+
+let Namespace = "SystemZ" in {
+def subreg_l32 : SubRegIndex<32, 0>; // Also acts as subreg_ll32.
+def subreg_h32 : SubRegIndex<32, 32>; // Also acts as subreg_lh32.
+def subreg_l64 : SubRegIndex<64, 0>;
+def subreg_h64 : SubRegIndex<64, 64>;
+def subreg_hh32 : ComposedSubRegIndex<subreg_h64, subreg_h32>;
+def subreg_hl32 : ComposedSubRegIndex<subreg_h64, subreg_l32>;
+}
+
+// Define a register class that contains values of type TYPE and an
+// associated operand called NAME. SIZE is the size and alignment
+// of the registers and REGLIST is the list of individual registers.
+multiclass SystemZRegClass<string name, ValueType type, int size, dag regList> {
+ def AsmOperand : AsmOperandClass {
+ let Name = name;
+ let ParserMethod = "parse"##name;
+ let RenderMethod = "addRegOperands";
+ }
+ def Bit : RegisterClass<"SystemZ", [type], size, regList> {
+ let Size = size;
+ }
+ def "" : RegisterOperand<!cast<RegisterClass>(name##"Bit")> {
+ let ParserMatchClass = !cast<AsmOperandClass>(name##"AsmOperand");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// General-purpose registers
+//===----------------------------------------------------------------------===//
+
+// Lower 32 bits of one of the 16 64-bit general-purpose registers
+class GPR32<bits<16> num, string n> : SystemZReg<n> {
+ let HWEncoding = num;
+}
+
+// One of the 16 64-bit general-purpose registers.
+class GPR64<bits<16> num, string n, GPR32 low, GPR32 high>
+ : SystemZRegWithSubregs<n, [low, high]> {
+ let HWEncoding = num;
+ let SubRegIndices = [subreg_l32, subreg_h32];
+}
+
+// 8 even-odd pairs of GPR64s.
+class GPR128<bits<16> num, string n, GPR64 low, GPR64 high>
+ : SystemZRegWithSubregs<n, [low, high]> {
+ let HWEncoding = num;
+ let SubRegIndices = [subreg_l64, subreg_h64];
+}
+
+// General-purpose registers
+foreach I = 0-15 in {
+ def R#I#L : GPR32<I, "r"#I>;
+ def R#I#H : GPR32<I, "r"#I>;
+ def R#I#D : GPR64<I, "r"#I, !cast<GPR32>("R"#I#"L"), !cast<GPR32>("R"#I#"H")>,
+ DwarfRegNum<[I]>;
+}
+
+foreach I = [0, 2, 4, 6, 8, 10, 12, 14] in {
+ def R#I#Q : GPR128<I, "r"#I, !cast<GPR64>("R"#!add(I, 1)#"D"),
+ !cast<GPR64>("R"#I#"D")>;
+}
+
+/// Allocate the callee-saved R6-R13 backwards. That way they can be saved
+/// together with R14 and R15 in one prolog instruction.
+defm GR32 : SystemZRegClass<"GR32", i32, 32, (add (sequence "R%uL", 0, 5),
+ (sequence "R%uL", 15, 6))>;
+defm GRH32 : SystemZRegClass<"GRH32", i32, 32, (add (sequence "R%uH", 0, 5),
+ (sequence "R%uH", 15, 6))>;
+defm GR64 : SystemZRegClass<"GR64", i64, 64, (add (sequence "R%uD", 0, 5),
+ (sequence "R%uD", 15, 6))>;
+
+// Combine the low and high GR32s into a single class. This can only be
+// used for virtual registers if the high-word facility is available.
+defm GRX32 : SystemZRegClass<"GRX32", i32, 32,
+ (add (sequence "R%uL", 0, 5),
+ (sequence "R%uH", 0, 5),
+ R15L, R15H, R14L, R14H, R13L, R13H,
+ R12L, R12H, R11L, R11H, R10L, R10H,
+ R9L, R9H, R8L, R8H, R7L, R7H, R6L, R6H)>;
+
+// The architecture doesn't really have any i128 support, so model the
+// register pairs as untyped instead.
+defm GR128 : SystemZRegClass<"GR128", untyped, 128, (add R0Q, R2Q, R4Q,
+ R12Q, R10Q, R8Q, R6Q,
+ R14Q)>;
+
+// Base and index registers. Everything except R0, which in an address
+// context evaluates as 0.
+defm ADDR32 : SystemZRegClass<"ADDR32", i32, 32, (sub GR32Bit, R0L)>;
+defm ADDR64 : SystemZRegClass<"ADDR64", i64, 64, (sub GR64Bit, R0D)>;
+
+// Not used directly, but needs to exist for ADDR32 and ADDR64 subregs
+// of a GR128.
+defm ADDR128 : SystemZRegClass<"ADDR128", untyped, 128, (sub GR128Bit, R0Q)>;
+
+//===----------------------------------------------------------------------===//
+// Floating-point registers
+//===----------------------------------------------------------------------===//
+
+// Lower 32 bits of one of the 16 64-bit floating-point registers
+class FPR32<bits<16> num, string n> : SystemZReg<n> {
+ let HWEncoding = num;
+}
+
+// One of the 16 64-bit floating-point registers
+class FPR64<bits<16> num, string n, FPR32 low>
+ : SystemZRegWithSubregs<n, [low]> {
+ let HWEncoding = num;
+ let SubRegIndices = [subreg_h32];
+}
+
+// 8 pairs of FPR64s, with a one-register gap inbetween.
+class FPR128<bits<16> num, string n, FPR64 low, FPR64 high>
+ : SystemZRegWithSubregs<n, [low, high]> {
+ let HWEncoding = num;
+ let SubRegIndices = [subreg_l64, subreg_h64];
+}
+
+// Floating-point registers
+foreach I = 0-15 in {
+ def F#I#S : FPR32<I, "f"#I>;
+ def F#I#D : FPR64<I, "f"#I, !cast<FPR32>("F"#I#"S")>,
+ DwarfRegNum<[!add(I, 16)]>;
+}
+
+foreach I = [0, 1, 4, 5, 8, 9, 12, 13] in {
+ def F#I#Q : FPR128<I, "f"#I, !cast<FPR64>("F"#!add(I, 2)#"D"),
+ !cast<FPR64>("F"#I#"D")>;
+}
+
+// There's no store-multiple instruction for FPRs, so we're not fussy
+// about the order in which call-saved registers are allocated.
+defm FP32 : SystemZRegClass<"FP32", f32, 32, (sequence "F%uS", 0, 15)>;
+defm FP64 : SystemZRegClass<"FP64", f64, 64, (sequence "F%uD", 0, 15)>;
+defm FP128 : SystemZRegClass<"FP128", f128, 128, (add F0Q, F1Q, F4Q, F5Q,
+ F8Q, F9Q, F12Q, F13Q)>;
+
+//===----------------------------------------------------------------------===//
+// Other registers
+//===----------------------------------------------------------------------===//
+
+// The 2-bit condition code field of the PSW. Every register named in an
+// inline asm needs a class associated with it.
+def CC : SystemZReg<"cc">;
+def CCRegs : RegisterClass<"SystemZ", [i32], 32, (add CC)>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
new file mode 100644
index 000000000000..c7ebb5d6b4ec
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -0,0 +1,293 @@
+//===-- SystemZSelectionDAGInfo.cpp - SystemZ SelectionDAG Info -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SystemZSelectionDAGInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-selectiondag-info"
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+
+using namespace llvm;
+
+SystemZSelectionDAGInfo::
+SystemZSelectionDAGInfo(const SystemZTargetMachine &TM)
+ : TargetSelectionDAGInfo(TM) {
+}
+
+SystemZSelectionDAGInfo::~SystemZSelectionDAGInfo() {
+}
+
+// Decide whether it is best to use a loop or straight-line code for
+// a block operation of Size bytes with source address Src and destination
+// address Dest. Sequence is the opcode to use for straight-line code
+// (such as MVC) and Loop is the opcode to use for loops (such as MVC_LOOP).
+// Return the chain for the completed operation.
+static SDValue emitMemMem(SelectionDAG &DAG, SDLoc DL, unsigned Sequence,
+ unsigned Loop, SDValue Chain, SDValue Dst,
+ SDValue Src, uint64_t Size) {
+ EVT PtrVT = Src.getValueType();
+ // The heuristic we use is to prefer loops for anything that would
+ // require 7 or more MVCs. With these kinds of sizes there isn't
+ // much to choose between straight-line code and looping code,
+ // since the time will be dominated by the MVCs themselves.
+ // However, the loop has 4 or 5 instructions (depending on whether
+ // the base addresses can be proved equal), so there doesn't seem
+ // much point using a loop for 5 * 256 bytes or fewer. Anything in
+ // the range (5 * 256, 6 * 256) will need another instruction after
+ // the loop, so it doesn't seem worth using a loop then either.
+ // The next value up, 6 * 256, can be implemented in the same
+ // number of straight-line MVCs as 6 * 256 - 1.
+ if (Size > 6 * 256)
+ return DAG.getNode(Loop, DL, MVT::Other, Chain, Dst, Src,
+ DAG.getConstant(Size, PtrVT),
+ DAG.getConstant(Size / 256, PtrVT));
+ return DAG.getNode(Sequence, DL, MVT::Other, Chain, Dst, Src,
+ DAG.getConstant(Size, PtrVT));
+}
+
+SDValue SystemZSelectionDAGInfo::
+EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, SDValue Src, SDValue Size, unsigned Align,
+ bool IsVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
+ if (IsVolatile)
+ return SDValue();
+
+ if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size))
+ return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
+ Chain, Dst, Src, CSize->getZExtValue());
+ return SDValue();
+}
+
+// Handle a memset of 1, 2, 4 or 8 bytes with the operands given by
+// Chain, Dst, ByteVal and Size. These cases are expected to use
+// MVI, MVHHI, MVHI and MVGHI respectively.
+static SDValue memsetStore(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, uint64_t ByteVal, uint64_t Size,
+ unsigned Align,
+ MachinePointerInfo DstPtrInfo) {
+ uint64_t StoreVal = ByteVal;
+ for (unsigned I = 1; I < Size; ++I)
+ StoreVal |= ByteVal << (I * 8);
+ return DAG.getStore(Chain, DL,
+ DAG.getConstant(StoreVal, MVT::getIntegerVT(Size * 8)),
+ Dst, DstPtrInfo, false, false, Align);
+}
+
+SDValue SystemZSelectionDAGInfo::
+EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, SDValue Byte, SDValue Size,
+ unsigned Align, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo) const {
+ EVT PtrVT = Dst.getValueType();
+
+ if (IsVolatile)
+ return SDValue();
+
+ if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ uint64_t Bytes = CSize->getZExtValue();
+ if (Bytes == 0)
+ return SDValue();
+ if (ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte)) {
+ // Handle cases that can be done using at most two of
+ // MVI, MVHI, MVHHI and MVGHI. The latter two can only be
+ // used if ByteVal is all zeros or all ones; in other casees,
+ // we can move at most 2 halfwords.
+ uint64_t ByteVal = CByte->getZExtValue();
+ if (ByteVal == 0 || ByteVal == 255 ?
+ Bytes <= 16 && CountPopulation_64(Bytes) <= 2 :
+ Bytes <= 4) {
+ unsigned Size1 = Bytes == 16 ? 8 : 1 << findLastSet(Bytes);
+ unsigned Size2 = Bytes - Size1;
+ SDValue Chain1 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size1,
+ Align, DstPtrInfo);
+ if (Size2 == 0)
+ return Chain1;
+ Dst = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
+ DAG.getConstant(Size1, PtrVT));
+ DstPtrInfo = DstPtrInfo.getWithOffset(Size1);
+ SDValue Chain2 = memsetStore(DAG, DL, Chain, Dst, ByteVal, Size2,
+ std::min(Align, Size1), DstPtrInfo);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2);
+ }
+ } else {
+ // Handle one and two bytes using STC.
+ if (Bytes <= 2) {
+ SDValue Chain1 = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo,
+ false, false, Align);
+ if (Bytes == 1)
+ return Chain1;
+ SDValue Dst2 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
+ DAG.getConstant(1, PtrVT));
+ SDValue Chain2 = DAG.getStore(Chain, DL, Byte, Dst2,
+ DstPtrInfo.getWithOffset(1),
+ false, false, 1);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chain1, Chain2);
+ }
+ }
+ assert(Bytes >= 2 && "Should have dealt with 0- and 1-byte cases already");
+
+ // Handle the special case of a memset of 0, which can use XC.
+ ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte);
+ if (CByte && CByte->getZExtValue() == 0)
+ return emitMemMem(DAG, DL, SystemZISD::XC, SystemZISD::XC_LOOP,
+ Chain, Dst, Dst, Bytes);
+
+ // Copy the byte to the first location and then use MVC to copy
+ // it to the rest.
+ Chain = DAG.getStore(Chain, DL, Byte, Dst, DstPtrInfo,
+ false, false, Align);
+ SDValue DstPlus1 = DAG.getNode(ISD::ADD, DL, PtrVT, Dst,
+ DAG.getConstant(1, PtrVT));
+ return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
+ Chain, DstPlus1, Dst, Bytes - 1);
+ }
+ return SDValue();
+}
+
+// Use CLC to compare [Src1, Src1 + Size) with [Src2, Src2 + Size),
+// deciding whether to use a loop or straight-line code.
+static SDValue emitCLC(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2, uint64_t Size) {
+ SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ EVT PtrVT = Src1.getValueType();
+ // A two-CLC sequence is a clear win over a loop, not least because it
+ // needs only one branch. A three-CLC sequence needs the same number
+ // of branches as a loop (i.e. 2), but is shorter. That brings us to
+ // lengths greater than 768 bytes. It seems relatively likely that
+ // a difference will be found within the first 768 bytes, so we just
+ // optimize for the smallest number of branch instructions, in order
+ // to avoid polluting the prediction buffer too much. A loop only ever
+ // needs 2 branches, whereas a straight-line sequence would need 3 or more.
+ if (Size > 3 * 256)
+ return DAG.getNode(SystemZISD::CLC_LOOP, DL, VTs, Chain, Src1, Src2,
+ DAG.getConstant(Size, PtrVT),
+ DAG.getConstant(Size / 256, PtrVT));
+ return DAG.getNode(SystemZISD::CLC, DL, VTs, Chain, Src1, Src2,
+ DAG.getConstant(Size, PtrVT));
+}
+
+// Convert the current CC value into an integer that is 0 if CC == 0,
+// less than zero if CC == 1 and greater than zero if CC >= 2.
+// The sequence starts with IPM, which puts CC into bits 29 and 28
+// of an integer and clears bits 30 and 31.
+static SDValue addIPMSequence(SDLoc DL, SDValue Glue, SelectionDAG &DAG) {
+ SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
+ SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
+ DAG.getConstant(SystemZ::IPM_CC, MVT::i32));
+ SDValue ROTL = DAG.getNode(ISD::ROTL, DL, MVT::i32, SRL,
+ DAG.getConstant(31, MVT::i32));
+ return ROTL;
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2, SDValue Size,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
+ if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ uint64_t Bytes = CSize->getZExtValue();
+ assert(Bytes > 0 && "Caller should have handled 0-size case");
+ Chain = emitCLC(DAG, DL, Chain, Src1, Src2, Bytes);
+ SDValue Glue = Chain.getValue(1);
+ return std::make_pair(addIPMSequence(DL, Glue, DAG), Chain);
+ }
+ return std::make_pair(SDValue(), SDValue());
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue Char, SDValue Length,
+ MachinePointerInfo SrcPtrInfo) const {
+ // Use SRST to find the character. End is its address on success.
+ EVT PtrVT = Src.getValueType();
+ SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other, MVT::Glue);
+ Length = DAG.getZExtOrTrunc(Length, DL, PtrVT);
+ Char = DAG.getZExtOrTrunc(Char, DL, MVT::i32);
+ Char = DAG.getNode(ISD::AND, DL, MVT::i32, Char,
+ DAG.getConstant(255, MVT::i32));
+ SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, Length);
+ SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
+ Limit, Src, Char);
+ Chain = End.getValue(1);
+ SDValue Glue = End.getValue(2);
+
+ // Now select between End and null, depending on whether the character
+ // was found.
+ SmallVector<SDValue, 5> Ops;
+ Ops.push_back(End);
+ Ops.push_back(DAG.getConstant(0, PtrVT));
+ Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST, MVT::i32));
+ Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, MVT::i32));
+ Ops.push_back(Glue);
+ VTs = DAG.getVTList(PtrVT, MVT::Glue);
+ End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+ return std::make_pair(End, Chain);
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dest, SDValue Src,
+ MachinePointerInfo DestPtrInfo,
+ MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
+ SDVTList VTs = DAG.getVTList(Dest.getValueType(), MVT::Other);
+ SDValue EndDest = DAG.getNode(SystemZISD::STPCPY, DL, VTs, Chain, Dest, Src,
+ DAG.getConstant(0, MVT::i32));
+ return std::make_pair(isStpcpy ? EndDest : Dest, EndDest.getValue(1));
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
+ SDVTList VTs = DAG.getVTList(Src1.getValueType(), MVT::Other, MVT::Glue);
+ SDValue Unused = DAG.getNode(SystemZISD::STRCMP, DL, VTs, Chain, Src1, Src2,
+ DAG.getConstant(0, MVT::i32));
+ Chain = Unused.getValue(1);
+ SDValue Glue = Chain.getValue(2);
+ return std::make_pair(addIPMSequence(DL, Glue, DAG), Chain);
+}
+
+// Search from Src for a null character, stopping once Src reaches Limit.
+// Return a pair of values, the first being the number of nonnull characters
+// and the second being the out chain.
+//
+// This can be used for strlen by setting Limit to 0.
+static std::pair<SDValue, SDValue> getBoundedStrlen(SelectionDAG &DAG, SDLoc DL,
+ SDValue Chain, SDValue Src,
+ SDValue Limit) {
+ EVT PtrVT = Src.getValueType();
+ SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other, MVT::Glue);
+ SDValue End = DAG.getNode(SystemZISD::SEARCH_STRING, DL, VTs, Chain,
+ Limit, Src, DAG.getConstant(0, MVT::i32));
+ Chain = End.getValue(1);
+ SDValue Len = DAG.getNode(ISD::SUB, DL, PtrVT, End, Src);
+ return std::make_pair(Len, Chain);
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, MachinePointerInfo SrcPtrInfo) const {
+ EVT PtrVT = Src.getValueType();
+ return getBoundedStrlen(DAG, DL, Chain, Src, DAG.getConstant(0, PtrVT));
+}
+
+std::pair<SDValue, SDValue> SystemZSelectionDAGInfo::
+EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue MaxLength,
+ MachinePointerInfo SrcPtrInfo) const {
+ EVT PtrVT = Src.getValueType();
+ MaxLength = DAG.getZExtOrTrunc(MaxLength, DL, PtrVT);
+ SDValue Limit = DAG.getNode(ISD::ADD, DL, PtrVT, Src, MaxLength);
+ return getBoundedStrlen(DAG, DL, Chain, Src, Limit);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
new file mode 100644
index 000000000000..281d1e291dc9
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
@@ -0,0 +1,80 @@
+//===-- SystemZSelectionDAGInfo.h - SystemZ SelectionDAG Info ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SystemZ subclass for TargetSelectionDAGInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZSELECTIONDAGINFO_H
+#define SYSTEMZSELECTIONDAGINFO_H
+
+#include "llvm/Target/TargetSelectionDAGInfo.h"
+
+namespace llvm {
+
+class SystemZTargetMachine;
+
+class SystemZSelectionDAGInfo : public TargetSelectionDAGInfo {
+public:
+ explicit SystemZSelectionDAGInfo(const SystemZTargetMachine &TM);
+ ~SystemZSelectionDAGInfo();
+
+ virtual
+ SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ bool IsVolatile, bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const
+ LLVM_OVERRIDE;
+
+ virtual SDValue
+ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL,
+ SDValue Chain, SDValue Dst, SDValue Byte,
+ SDValue Size, unsigned Align, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2, SDValue Size,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue Char, SDValue Length,
+ MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dest, SDValue Src,
+ MachinePointerInfo DestPtrInfo,
+ MachinePointerInfo SrcPtrInfo,
+ bool isStpcpy) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src1, SDValue Src2,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, MachinePointerInfo SrcPtrInfo) const
+ LLVM_OVERRIDE;
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue MaxLength,
+ MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
new file mode 100644
index 000000000000..537a54554045
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -0,0 +1,163 @@
+//===-- SystemZShortenInst.cpp - Instruction-shortening pass --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to replace instructions with shorter forms. For example,
+// IILF can be replaced with LLILL or LLILH if the constant fits and if the
+// other 32 bits of the GR64 destination are not live.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "systemz-shorten-inst"
+
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+using namespace llvm;
+
+namespace {
+ class SystemZShortenInst : public MachineFunctionPass {
+ public:
+ static char ID;
+ SystemZShortenInst(const SystemZTargetMachine &tm);
+
+ virtual const char *getPassName() const {
+ return "SystemZ Instruction Shortening";
+ }
+
+ bool processBlock(MachineBasicBlock *MBB);
+ bool runOnMachineFunction(MachineFunction &F);
+
+ private:
+ bool shortenIIF(MachineInstr &MI, unsigned *GPRMap, unsigned LiveOther,
+ unsigned LLIxL, unsigned LLIxH);
+
+ const SystemZInstrInfo *TII;
+
+ // LowGPRs[I] has bit N set if LLVM register I includes the low
+ // word of GPR N. HighGPRs is the same for the high word.
+ unsigned LowGPRs[SystemZ::NUM_TARGET_REGS];
+ unsigned HighGPRs[SystemZ::NUM_TARGET_REGS];
+ };
+
+ char SystemZShortenInst::ID = 0;
+} // end of anonymous namespace
+
+FunctionPass *llvm::createSystemZShortenInstPass(SystemZTargetMachine &TM) {
+ return new SystemZShortenInst(TM);
+}
+
+SystemZShortenInst::SystemZShortenInst(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(0), LowGPRs(), HighGPRs() {
+ // Set up LowGPRs and HighGPRs.
+ for (unsigned I = 0; I < 16; ++I) {
+ LowGPRs[SystemZMC::GR32Regs[I]] |= 1 << I;
+ LowGPRs[SystemZMC::GR64Regs[I]] |= 1 << I;
+ HighGPRs[SystemZMC::GRH32Regs[I]] |= 1 << I;
+ HighGPRs[SystemZMC::GR64Regs[I]] |= 1 << I;
+ if (unsigned GR128 = SystemZMC::GR128Regs[I]) {
+ LowGPRs[GR128] |= 3 << I;
+ HighGPRs[GR128] |= 3 << I;
+ }
+ }
+}
+
+// MI loads one word of a GPR using an IIxF instruction and LLIxL and LLIxH
+// are the halfword immediate loads for the same word. Try to use one of them
+// instead of IIxF. If MI loads the high word, GPRMap[X] is the set of high
+// words referenced by LLVM register X while LiveOther is the mask of low
+// words that are currently live, and vice versa.
+bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned *GPRMap,
+ unsigned LiveOther, unsigned LLIxL,
+ unsigned LLIxH) {
+ unsigned Reg = MI.getOperand(0).getReg();
+ assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
+ unsigned GPRs = GPRMap[Reg];
+ assert(GPRs != 0 && "Register must be a GPR");
+ if (GPRs & LiveOther)
+ return false;
+
+ uint64_t Imm = MI.getOperand(1).getImm();
+ if (SystemZ::isImmLL(Imm)) {
+ MI.setDesc(TII->get(LLIxL));
+ MI.getOperand(0).setReg(SystemZMC::getRegAsGR64(Reg));
+ return true;
+ }
+ if (SystemZ::isImmLH(Imm)) {
+ MI.setDesc(TII->get(LLIxH));
+ MI.getOperand(0).setReg(SystemZMC::getRegAsGR64(Reg));
+ MI.getOperand(1).setImm(Imm >> 16);
+ return true;
+ }
+ return false;
+}
+
+// Process all instructions in MBB. Return true if something changed.
+bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
+ bool Changed = false;
+
+ // Work out which words are live on exit from the block.
+ unsigned LiveLow = 0;
+ unsigned LiveHigh = 0;
+ for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
+ SE = MBB->succ_end(); SI != SE; ++SI) {
+ for (MachineBasicBlock::livein_iterator LI = (*SI)->livein_begin(),
+ LE = (*SI)->livein_end(); LI != LE; ++LI) {
+ unsigned Reg = *LI;
+ assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
+ LiveLow |= LowGPRs[Reg];
+ LiveHigh |= HighGPRs[Reg];
+ }
+ }
+
+ // Iterate backwards through the block looking for instructions to change.
+ for (MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin(),
+ MBBE = MBB->rend(); MBBI != MBBE; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SystemZ::IILF)
+ Changed |= shortenIIF(MI, LowGPRs, LiveHigh, SystemZ::LLILL,
+ SystemZ::LLILH);
+ else if (Opcode == SystemZ::IIHF)
+ Changed |= shortenIIF(MI, HighGPRs, LiveLow, SystemZ::LLIHL,
+ SystemZ::LLIHH);
+ unsigned UsedLow = 0;
+ unsigned UsedHigh = 0;
+ for (MachineInstr::mop_iterator MOI = MI.operands_begin(),
+ MOE = MI.operands_end(); MOI != MOE; ++MOI) {
+ MachineOperand &MO = *MOI;
+ if (MO.isReg()) {
+ if (unsigned Reg = MO.getReg()) {
+ assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
+ if (MO.isDef()) {
+ LiveLow &= ~LowGPRs[Reg];
+ LiveHigh &= ~HighGPRs[Reg];
+ } else if (!MO.isUndef()) {
+ UsedLow |= LowGPRs[Reg];
+ UsedHigh |= HighGPRs[Reg];
+ }
+ }
+ }
+ }
+ LiveLow |= UsedLow;
+ LiveHigh |= UsedHigh;
+ }
+
+ return Changed;
+}
+
+bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) {
+ TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
+
+ bool Changed = false;
+ for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
+ MFI != MFE; ++MFI)
+ Changed |= processBlock(MFI);
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
new file mode 100644
index 000000000000..3971d5e2a5fa
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -0,0 +1,67 @@
+//===-- SystemZSubtarget.cpp - SystemZ subtarget information --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZSubtarget.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/Host.h"
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "SystemZGenSubtargetInfo.inc"
+
+using namespace llvm;
+
+// Pin the vtabel to this file.
+void SystemZSubtarget::anchor() {}
+
+SystemZSubtarget::SystemZSubtarget(const std::string &TT,
+ const std::string &CPU,
+ const std::string &FS)
+ : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
+ HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false),
+ TargetTriple(TT) {
+ std::string CPUName = CPU;
+ if (CPUName.empty())
+ CPUName = "generic";
+#if defined(__linux__) && defined(__s390x__)
+ if (CPUName == "generic")
+ CPUName = sys::getHostCPUName();
+#endif
+
+ // Parse features string.
+ ParseSubtargetFeatures(CPUName, FS);
+}
+
+// Return true if GV binds locally under reloc model RM.
+static bool bindsLocally(const GlobalValue *GV, Reloc::Model RM) {
+ // For non-PIC, all symbols bind locally.
+ if (RM == Reloc::Static)
+ return true;
+
+ return GV->hasLocalLinkage() || !GV->hasDefaultVisibility();
+}
+
+bool SystemZSubtarget::isPC32DBLSymbol(const GlobalValue *GV,
+ Reloc::Model RM,
+ CodeModel::Model CM) const {
+ // PC32DBL accesses require the low bit to be clear. Note that a zero
+ // value selects the default alignment and is therefore OK.
+ if (GV->getAlignment() == 1)
+ return false;
+
+ // For the small model, all locally-binding symbols are in range.
+ if (CM == CodeModel::Small)
+ return bindsLocally(GV, RM);
+
+ // For Medium and above, assume that the symbol is not within the 4GB range.
+ // Taking the address of locally-defined text would be OK, but that
+ // case isn't easy to detect.
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
new file mode 100644
index 000000000000..5817491d4585
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
@@ -0,0 +1,70 @@
+//===-- SystemZSubtarget.h - SystemZ subtarget information -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SystemZ specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SYSTEMZSUBTARGET_H
+#define SYSTEMZSUBTARGET_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include <string>
+
+#define GET_SUBTARGETINFO_HEADER
+#include "SystemZGenSubtargetInfo.inc"
+
+namespace llvm {
+class GlobalValue;
+class StringRef;
+
+class SystemZSubtarget : public SystemZGenSubtargetInfo {
+ virtual void anchor();
+protected:
+ bool HasDistinctOps;
+ bool HasLoadStoreOnCond;
+ bool HasHighWord;
+ bool HasFPExtension;
+
+private:
+ Triple TargetTriple;
+
+public:
+ SystemZSubtarget(const std::string &TT, const std::string &CPU,
+ const std::string &FS);
+
+ // This is important for reducing register pressure in vector code.
+ virtual bool useAA() const LLVM_OVERRIDE { return true; }
+
+ // Automatically generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+
+ // Return true if the target has the distinct-operands facility.
+ bool hasDistinctOps() const { return HasDistinctOps; }
+
+ // Return true if the target has the load/store-on-condition facility.
+ bool hasLoadStoreOnCond() const { return HasLoadStoreOnCond; }
+
+ // Return true if the target has the high-word facility.
+ bool hasHighWord() const { return HasHighWord; }
+
+ // Return true if the target has the floating-point extension facility.
+ bool hasFPExtension() const { return HasFPExtension; }
+
+ // Return true if GV can be accessed using LARL for reloc model RM
+ // and code model CM.
+ bool isPC32DBLSymbol(const GlobalValue *GV, Reloc::Model RM,
+ CodeModel::Model CM) const;
+
+ bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
new file mode 100644
index 000000000000..dee92e960c54
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -0,0 +1,108 @@
+//===-- SystemZTargetMachine.cpp - Define TargetMachine for SystemZ -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZTargetMachine.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Transforms/Scalar.h"
+
+using namespace llvm;
+
+extern "C" void LLVMInitializeSystemZTarget() {
+ // Register the target.
+ RegisterTargetMachine<SystemZTargetMachine> X(TheSystemZTarget);
+}
+
+SystemZTargetMachine::SystemZTargetMachine(const Target &T, StringRef TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS),
+ // Make sure that global data has at least 16 bits of alignment by default,
+ // so that we can refer to it using LARL. We don't have any special
+ // requirements for stack variables though.
+ DL("E-p:64:64:64-i1:8:16-i8:8:16-i16:16-i32:32-i64:64"
+ "-f32:32-f64:64-f128:64-a0:8:16-n32:64"),
+ InstrInfo(*this), TLInfo(*this), TSInfo(*this),
+ FrameLowering(*this, Subtarget) {
+ initAsmInfo();
+}
+
+namespace {
+/// SystemZ Code Generator Pass Configuration Options.
+class SystemZPassConfig : public TargetPassConfig {
+public:
+ SystemZPassConfig(SystemZTargetMachine *TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ SystemZTargetMachine &getSystemZTargetMachine() const {
+ return getTM<SystemZTargetMachine>();
+ }
+
+ virtual void addIRPasses() LLVM_OVERRIDE;
+ virtual bool addInstSelector() LLVM_OVERRIDE;
+ virtual bool addPreSched2() LLVM_OVERRIDE;
+ virtual bool addPreEmitPass() LLVM_OVERRIDE;
+};
+} // end anonymous namespace
+
+void SystemZPassConfig::addIRPasses() {
+ TargetPassConfig::addIRPasses();
+ addPass(createPartiallyInlineLibCallsPass());
+}
+
+bool SystemZPassConfig::addInstSelector() {
+ addPass(createSystemZISelDag(getSystemZTargetMachine(), getOptLevel()));
+ return false;
+}
+
+bool SystemZPassConfig::addPreSched2() {
+ if (getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
+ addPass(&IfConverterID);
+ return true;
+}
+
+bool SystemZPassConfig::addPreEmitPass() {
+ // We eliminate comparisons here rather than earlier because some
+ // transformations can change the set of available CC values and we
+ // generally want those transformations to have priority. This is
+ // especially true in the commonest case where the result of the comparison
+ // is used by a single in-range branch instruction, since we will then
+ // be able to fuse the compare and the branch instead.
+ //
+ // For example, two-address NILF can sometimes be converted into
+ // three-address RISBLG. NILF produces a CC value that indicates whether
+ // the low word is zero, but RISBLG does not modify CC at all. On the
+ // other hand, 64-bit ANDs like NILL can sometimes be converted to RISBG.
+ // The CC value produced by NILL isn't useful for our purposes, but the
+ // value produced by RISBG can be used for any comparison with zero
+ // (not just equality). So there are some transformations that lose
+ // CC values (while still being worthwhile) and others that happen to make
+ // the CC result more useful than it was originally.
+ //
+ // Another reason is that we only want to use BRANCH ON COUNT in cases
+ // where we know that the count register is not going to be spilled.
+ //
+ // Doing it so late makes it more likely that a register will be reused
+ // between the comparison and the branch, but it isn't clear whether
+ // preventing that would be a win or not.
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createSystemZElimComparePass(getSystemZTargetMachine()));
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createSystemZShortenInstPass(getSystemZTargetMachine()));
+ addPass(createSystemZLongBranchPass(getSystemZTargetMachine()));
+ return true;
+}
+
+TargetPassConfig *SystemZTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new SystemZPassConfig(this, PM);
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
new file mode 100644
index 000000000000..a99a98e08477
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -0,0 +1,74 @@
+//==- SystemZTargetMachine.h - Define TargetMachine for SystemZ ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SystemZ specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef SYSTEMZTARGETMACHINE_H
+#define SYSTEMZTARGETMACHINE_H
+
+#include "SystemZFrameLowering.h"
+#include "SystemZISelLowering.h"
+#include "SystemZInstrInfo.h"
+#include "SystemZRegisterInfo.h"
+#include "SystemZSubtarget.h"
+#include "SystemZSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class SystemZTargetMachine : public LLVMTargetMachine {
+ SystemZSubtarget Subtarget;
+ const DataLayout DL;
+ SystemZInstrInfo InstrInfo;
+ SystemZTargetLowering TLInfo;
+ SystemZSelectionDAGInfo TSInfo;
+ SystemZFrameLowering FrameLowering;
+
+public:
+ SystemZTargetMachine(const Target &T, StringRef TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+
+ // Override TargetMachine.
+ virtual const TargetFrameLowering *getFrameLowering() const LLVM_OVERRIDE {
+ return &FrameLowering;
+ }
+ virtual const SystemZInstrInfo *getInstrInfo() const LLVM_OVERRIDE {
+ return &InstrInfo;
+ }
+ virtual const SystemZSubtarget *getSubtargetImpl() const LLVM_OVERRIDE {
+ return &Subtarget;
+ }
+ virtual const DataLayout *getDataLayout() const LLVM_OVERRIDE {
+ return &DL;
+ }
+ virtual const SystemZRegisterInfo *getRegisterInfo() const LLVM_OVERRIDE {
+ return &InstrInfo.getRegisterInfo();
+ }
+ virtual const SystemZTargetLowering *getTargetLowering() const LLVM_OVERRIDE {
+ return &TLInfo;
+ }
+ virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const
+ LLVM_OVERRIDE {
+ return &TSInfo;
+ }
+
+ // Override LLVMTargetMachine
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM) LLVM_OVERRIDE;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/TargetInfo/SystemZTargetInfo.cpp b/contrib/llvm/lib/Target/SystemZ/TargetInfo/SystemZTargetInfo.cpp
new file mode 100644
index 000000000000..8f9aa2811d05
--- /dev/null
+++ b/contrib/llvm/lib/Target/SystemZ/TargetInfo/SystemZTargetInfo.cpp
@@ -0,0 +1,20 @@
+//===-- SystemZTargetInfo.cpp - SystemZ target implementation -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SystemZ.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+Target llvm::TheSystemZTarget;
+
+extern "C" void LLVMInitializeSystemZTargetInfo() {
+ RegisterTarget<Triple::systemz, /*HasJIT=*/true>
+ X(TheSystemZTarget, "systemz", "SystemZ");
+}