aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/SystemZ
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
committerDimitry Andric <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
commit91bc56ed825ba56b3cc264aa5c95ab84f86832ab (patch)
tree4df130b28021d86e13bf4565ef58c1c5a5e093b4 /contrib/llvm/lib/Target/SystemZ
parent9efc7e72bb1daf5d6019871d9c93a1c488a11229 (diff)
parent5ca98fd98791947eba83a1ed3f2c8191ef7afa6c (diff)
downloadsrc-91bc56ed825ba56b3cc264aa5c95ab84f86832ab.tar.gz
src-91bc56ed825ba56b3cc264aa5c95ab84f86832ab.zip
Merge llvm 3.5.0 release from ^/vendor/llvm/dist, resolve conflicts, and
preserve our customizations, where necessary.
Notes
Notes: svn path=/projects/clang350-import/; revision=274968
Diffstat (limited to 'contrib/llvm/lib/Target/SystemZ')
-rw-r--r--contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp219
-rw-r--r--contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp21
-rw-r--r--contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp4
-rw-r--r--contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h6
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp35
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h5
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp87
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h22
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp35
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp10
-rw-r--r--contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h78
-rw-r--r--contrib/llvm/lib/Target/SystemZ/README.txt7
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.h187
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZ.td10
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp18
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h22
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h14
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td10
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h20
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp122
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp136
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h66
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp110
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp872
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h425
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td69
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td128
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp60
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h284
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td201
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp210
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h2
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperands.td29
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZOperators.td27
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td15
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td20
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp50
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h50
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td25
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp21
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h43
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp67
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp35
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h36
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp25
-rw-r--r--contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h43
51 files changed, 2188 insertions, 1803 deletions
diff --git a/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
index 763f40c7ff44..758be41ce263 100644
--- a/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/AsmParser/SystemZAsmParser.cpp
@@ -22,7 +22,7 @@ using namespace llvm;
// Return true if Expr is in the range [MinValue, MaxValue].
static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) {
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) {
+ if (auto *CE = dyn_cast<MCConstantExpr>(Expr)) {
int64_t Value = CE->getValue();
return Value >= MinValue && Value <= MaxValue;
}
@@ -104,55 +104,55 @@ private:
MemOp Mem;
};
- SystemZOperand(OperandKind kind, SMLoc startLoc, SMLoc endLoc)
- : Kind(kind), StartLoc(startLoc), EndLoc(endLoc)
- {}
-
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
// Add as immediates when possible. Null MCExpr = 0.
- if (Expr == 0)
+ if (!Expr)
Inst.addOperand(MCOperand::CreateImm(0));
- else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ else if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
else
Inst.addOperand(MCOperand::CreateExpr(Expr));
}
public:
+ SystemZOperand(OperandKind kind, SMLoc startLoc, SMLoc endLoc)
+ : Kind(kind), StartLoc(startLoc), EndLoc(endLoc) {}
+
// Create particular kinds of operand.
- static SystemZOperand *createInvalid(SMLoc StartLoc, SMLoc EndLoc) {
- return new SystemZOperand(KindInvalid, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand> createInvalid(SMLoc StartLoc,
+ SMLoc EndLoc) {
+ return make_unique<SystemZOperand>(KindInvalid, StartLoc, EndLoc);
}
- static SystemZOperand *createToken(StringRef Str, SMLoc Loc) {
- SystemZOperand *Op = new SystemZOperand(KindToken, Loc, Loc);
+ static std::unique_ptr<SystemZOperand> createToken(StringRef Str, SMLoc Loc) {
+ auto Op = make_unique<SystemZOperand>(KindToken, Loc, Loc);
Op->Token.Data = Str.data();
Op->Token.Length = Str.size();
return Op;
}
- static SystemZOperand *createReg(RegisterKind Kind, unsigned Num,
- SMLoc StartLoc, SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindReg, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createReg(RegisterKind Kind, unsigned Num, SMLoc StartLoc, SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindReg, StartLoc, EndLoc);
Op->Reg.Kind = Kind;
Op->Reg.Num = Num;
return Op;
}
- static SystemZOperand *createAccessReg(unsigned Num, SMLoc StartLoc,
- SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindAccessReg, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createAccessReg(unsigned Num, SMLoc StartLoc, SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindAccessReg, StartLoc, EndLoc);
Op->AccessReg = Num;
return Op;
}
- static SystemZOperand *createImm(const MCExpr *Expr, SMLoc StartLoc,
- SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindImm, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createImm(const MCExpr *Expr, SMLoc StartLoc, SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindImm, StartLoc, EndLoc);
Op->Imm = Expr;
return Op;
}
- static SystemZOperand *createMem(RegisterKind RegKind, unsigned Base,
- const MCExpr *Disp, unsigned Index,
- const MCExpr *Length, SMLoc StartLoc,
- SMLoc EndLoc) {
- SystemZOperand *Op = new SystemZOperand(KindMem, StartLoc, EndLoc);
+ static std::unique_ptr<SystemZOperand>
+ createMem(RegisterKind RegKind, unsigned Base, const MCExpr *Disp,
+ unsigned Index, const MCExpr *Length, SMLoc StartLoc,
+ SMLoc EndLoc) {
+ auto Op = make_unique<SystemZOperand>(KindMem, StartLoc, EndLoc);
Op->Mem.RegKind = RegKind;
Op->Mem.Base = Base;
Op->Mem.Index = Index;
@@ -162,7 +162,7 @@ public:
}
// Token operands
- virtual bool isToken() const LLVM_OVERRIDE {
+ bool isToken() const override {
return Kind == KindToken;
}
StringRef getToken() const {
@@ -171,13 +171,13 @@ public:
}
// Register operands.
- virtual bool isReg() const LLVM_OVERRIDE {
+ bool isReg() const override {
return Kind == KindReg;
}
bool isReg(RegisterKind RegKind) const {
return Kind == KindReg && Reg.Kind == RegKind;
}
- virtual unsigned getReg() const LLVM_OVERRIDE {
+ unsigned getReg() const override {
assert(Kind == KindReg && "Not a register");
return Reg.Num;
}
@@ -189,7 +189,7 @@ public:
}
// Immediate operands.
- virtual bool isImm() const LLVM_OVERRIDE {
+ bool isImm() const override {
return Kind == KindImm;
}
bool isImm(int64_t MinValue, int64_t MaxValue) const {
@@ -201,14 +201,14 @@ public:
}
// Memory operands.
- virtual bool isMem() const LLVM_OVERRIDE {
+ bool isMem() const override {
return Kind == KindMem;
}
bool isMem(RegisterKind RegKind, MemoryKind MemKind) const {
return (Kind == KindMem &&
Mem.RegKind == RegKind &&
(MemKind == BDXMem || !Mem.Index) &&
- (MemKind == BDLMem) == (Mem.Length != 0));
+ (MemKind == BDLMem) == (Mem.Length != nullptr));
}
bool isMemDisp12(RegisterKind RegKind, MemoryKind MemKind) const {
return isMem(RegKind, MemKind) && inRange(Mem.Disp, 0, 0xfff);
@@ -221,9 +221,9 @@ public:
}
// Override MCParsedAsmOperand.
- virtual SMLoc getStartLoc() const LLVM_OVERRIDE { return StartLoc; }
- virtual SMLoc getEndLoc() const LLVM_OVERRIDE { return EndLoc; }
- virtual void print(raw_ostream &OS) const LLVM_OVERRIDE;
+ SMLoc getStartLoc() const override { return StartLoc; }
+ SMLoc getEndLoc() const override { return EndLoc; }
+ void print(raw_ostream &OS) const override;
// Used by the TableGen code to add particular types of operand
// to an instruction.
@@ -313,25 +313,24 @@ private:
bool parseRegister(Register &Reg, RegisterGroup Group, const unsigned *Regs,
bool IsAddress = false);
- OperandMatchResultTy
- parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- RegisterGroup Group, const unsigned *Regs, RegisterKind Kind);
+ OperandMatchResultTy parseRegister(OperandVector &Operands,
+ RegisterGroup Group, const unsigned *Regs,
+ RegisterKind Kind);
bool parseAddress(unsigned &Base, const MCExpr *&Disp,
unsigned &Index, const MCExpr *&Length,
const unsigned *Regs, RegisterKind RegKind);
- OperandMatchResultTy
- parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- const unsigned *Regs, RegisterKind RegKind,
- MemoryKind MemKind);
+ OperandMatchResultTy parseAddress(OperandVector &Operands,
+ const unsigned *Regs, RegisterKind RegKind,
+ MemoryKind MemKind);
- bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic);
+ bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
public:
SystemZAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser,
- const MCInstrInfo &MII)
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(sti), Parser(parser) {
MCAsmParserExtension::Initialize(Parser);
@@ -340,95 +339,72 @@ public:
}
// Override MCTargetAsmParser.
- virtual bool ParseDirective(AsmToken DirectiveID) LLVM_OVERRIDE;
- virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
- SMLoc &EndLoc) LLVM_OVERRIDE;
- virtual bool ParseInstruction(ParseInstructionInfo &Info,
- StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands)
- LLVM_OVERRIDE;
- virtual bool
- MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) LLVM_OVERRIDE;
+ bool ParseDirective(AsmToken DirectiveID) override;
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) override;
// Used by the TableGen code to parse particular operand types.
- OperandMatchResultTy
- parseGR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGR32(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, GR32Reg);
}
- OperandMatchResultTy
- parseGRH32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGRH32(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GRH32Regs, GRH32Reg);
}
- OperandMatchResultTy
- parseGRX32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGRX32(OperandVector &Operands) {
llvm_unreachable("GRX32 should only be used for pseudo instructions");
}
- OperandMatchResultTy
- parseGR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGR64(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, GR64Reg);
}
- OperandMatchResultTy
- parseGR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseGR128(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR128Regs, GR128Reg);
}
- OperandMatchResultTy
- parseADDR32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseADDR32(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR32Regs, ADDR32Reg);
}
- OperandMatchResultTy
- parseADDR64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseADDR64(OperandVector &Operands) {
return parseRegister(Operands, RegGR, SystemZMC::GR64Regs, ADDR64Reg);
}
- OperandMatchResultTy
- parseADDR128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseADDR128(OperandVector &Operands) {
llvm_unreachable("Shouldn't be used as an operand");
}
- OperandMatchResultTy
- parseFP32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseFP32(OperandVector &Operands) {
return parseRegister(Operands, RegFP, SystemZMC::FP32Regs, FP32Reg);
}
- OperandMatchResultTy
- parseFP64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseFP64(OperandVector &Operands) {
return parseRegister(Operands, RegFP, SystemZMC::FP64Regs, FP64Reg);
}
- OperandMatchResultTy
- parseFP128(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseFP128(OperandVector &Operands) {
return parseRegister(Operands, RegFP, SystemZMC::FP128Regs, FP128Reg);
}
- OperandMatchResultTy
- parseBDAddr32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDAddr32(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR32Regs, ADDR32Reg, BDMem);
}
- OperandMatchResultTy
- parseBDAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDAddr64(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDMem);
}
- OperandMatchResultTy
- parseBDXAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDXAddr64(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDXMem);
}
- OperandMatchResultTy
- parseBDLAddr64(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseBDLAddr64(OperandVector &Operands) {
return parseAddress(Operands, SystemZMC::GR64Regs, ADDR64Reg, BDLMem);
}
- OperandMatchResultTy
- parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
- OperandMatchResultTy
- parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- int64_t MinVal, int64_t MaxVal);
- OperandMatchResultTy
- parsePCRel16(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parseAccessReg(OperandVector &Operands);
+ OperandMatchResultTy parsePCRel(OperandVector &Operands, int64_t MinVal,
+ int64_t MaxVal);
+ OperandMatchResultTy parsePCRel16(OperandVector &Operands) {
return parsePCRel(Operands, -(1LL << 16), (1LL << 16) - 1);
}
- OperandMatchResultTy
- parsePCRel32(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+ OperandMatchResultTy parsePCRel32(OperandVector &Operands) {
return parsePCRel(Operands, -(1LL << 32), (1LL << 32) - 1);
}
};
-}
+} // end anonymous namespace
#define GET_REGISTER_MATCHER
#define GET_SUBTARGET_FEATURE_NAME
@@ -498,9 +474,8 @@ bool SystemZAsmParser::parseRegister(Register &Reg, RegisterGroup Group,
// Parse a register and add it to Operands. The other arguments are as above.
SystemZAsmParser::OperandMatchResultTy
-SystemZAsmParser::parseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- RegisterGroup Group, const unsigned *Regs,
- RegisterKind Kind) {
+SystemZAsmParser::parseRegister(OperandVector &Operands, RegisterGroup Group,
+ const unsigned *Regs, RegisterKind Kind) {
if (Parser.getTok().isNot(AsmToken::Percent))
return MatchOperand_NoMatch;
@@ -528,7 +503,7 @@ bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp,
// Parse the optional base and index.
Index = 0;
Base = 0;
- Length = 0;
+ Length = nullptr;
if (getLexer().is(AsmToken::LParen)) {
Parser.Lex();
@@ -567,9 +542,8 @@ bool SystemZAsmParser::parseAddress(unsigned &Base, const MCExpr *&Disp,
// Parse a memory operand and add it to Operands. The other arguments
// are as above.
SystemZAsmParser::OperandMatchResultTy
-SystemZAsmParser::parseAddress(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- const unsigned *Regs, RegisterKind RegKind,
- MemoryKind MemKind) {
+SystemZAsmParser::parseAddress(OperandVector &Operands, const unsigned *Regs,
+ RegisterKind RegKind, MemoryKind MemKind) {
SMLoc StartLoc = Parser.getTok().getLoc();
unsigned Base, Index;
const MCExpr *Disp;
@@ -623,9 +597,9 @@ bool SystemZAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
return false;
}
-bool SystemZAsmParser::
-ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+bool SystemZAsmParser::ParseInstruction(ParseInstructionInfo &Info,
+ StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
Operands.push_back(SystemZOperand::createToken(Name, NameLoc));
// Read the remaining operands.
@@ -656,9 +630,8 @@ ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc,
return false;
}
-bool SystemZAsmParser::
-parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- StringRef Mnemonic) {
+bool SystemZAsmParser::parseOperand(OperandVector &Operands,
+ StringRef Mnemonic) {
// Check if the current operand has a custom associated parser, if so, try to
// custom parse the operand, or fallback to the general approach.
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
@@ -701,11 +674,11 @@ parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return false;
}
-bool SystemZAsmParser::
-MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
- SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- MCStreamer &Out, unsigned &ErrorInfo,
- bool MatchingInlineAsm) {
+bool SystemZAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ unsigned &ErrorInfo,
+ bool MatchingInlineAsm) {
MCInst Inst;
unsigned MatchResult;
@@ -715,7 +688,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
default: break;
case Match_Success:
Inst.setLoc(IDLoc);
- Out.EmitInstruction(Inst);
+ Out.EmitInstruction(Inst, STI);
return false;
case Match_MissingFeature: {
@@ -740,7 +713,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
if (ErrorInfo >= Operands.size())
return Error(IDLoc, "too few operands for instruction");
- ErrorLoc = ((SystemZOperand*)Operands[ErrorInfo])->getStartLoc();
+ ErrorLoc = ((SystemZOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
@@ -754,13 +727,13 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
llvm_unreachable("Unexpected match type");
}
-SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
-parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
+SystemZAsmParser::OperandMatchResultTy
+SystemZAsmParser::parseAccessReg(OperandVector &Operands) {
if (Parser.getTok().isNot(AsmToken::Percent))
return MatchOperand_NoMatch;
Register Reg;
- if (parseRegister(Reg, RegAccess, 0))
+ if (parseRegister(Reg, RegAccess, nullptr))
return MatchOperand_ParseFail;
Operands.push_back(SystemZOperand::createAccessReg(Reg.Num,
@@ -769,9 +742,9 @@ parseAccessReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
return MatchOperand_Success;
}
-SystemZAsmParser::OperandMatchResultTy SystemZAsmParser::
-parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
- int64_t MinVal, int64_t MaxVal) {
+SystemZAsmParser::OperandMatchResultTy
+SystemZAsmParser::parsePCRel(OperandVector &Operands, int64_t MinVal,
+ int64_t MaxVal) {
MCContext &Ctx = getContext();
MCStreamer &Out = getStreamer();
const MCExpr *Expr;
@@ -781,7 +754,7 @@ parsePCRel(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
// For consistency with the GNU assembler, treat immediates as offsets
// from ".".
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) {
+ if (auto *CE = dyn_cast<MCConstantExpr>(Expr)) {
int64_t Value = CE->getValue();
if ((Value & 1) || Value < MinVal || Value > MaxVal) {
Error(StartLoc, "offset out of range");
diff --git a/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
index fc3c38d2f343..2350776e10fe 100644
--- a/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/Disassembler/SystemZDisassembler.cpp
@@ -17,28 +17,29 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-disassembler"
+
typedef MCDisassembler::DecodeStatus DecodeStatus;
namespace {
class SystemZDisassembler : public MCDisassembler {
public:
- SystemZDisassembler(const MCSubtargetInfo &STI)
- : MCDisassembler(STI) {}
+ SystemZDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : MCDisassembler(STI, Ctx) {}
virtual ~SystemZDisassembler() {}
// Override MCDisassembler.
- virtual DecodeStatus getInstruction(MCInst &instr,
- uint64_t &size,
- const MemoryObject &region,
- uint64_t address,
- raw_ostream &vStream,
- raw_ostream &cStream) const LLVM_OVERRIDE;
+ DecodeStatus getInstruction(MCInst &instr, uint64_t &size,
+ const MemoryObject &region, uint64_t address,
+ raw_ostream &vStream,
+ raw_ostream &cStream) const override;
};
} // end anonymous namespace
static MCDisassembler *createSystemZDisassembler(const Target &T,
- const MCSubtargetInfo &STI) {
- return new SystemZDisassembler(STI);
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new SystemZDisassembler(STI, Ctx);
}
extern "C" void LLVMInitializeSystemZDisassembler() {
diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
index e1e64d3ac177..d2ba9b6f54c3 100644
--- a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.cpp
@@ -7,8 +7,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "asm-printer"
-
#include "SystemZInstPrinter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstrInfo.h"
@@ -16,6 +14,8 @@
using namespace llvm;
+#define DEBUG_TYPE "asm-printer"
+
#include "SystemZGenAsmWriter.inc"
void SystemZInstPrinter::printAddress(unsigned Base, int64_t Disp,
diff --git a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
index 734ecf0ff230..dce482b216fb 100644
--- a/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
+++ b/contrib/llvm/lib/Target/SystemZ/InstPrinter/SystemZInstPrinter.h
@@ -38,10 +38,8 @@ public:
static void printOperand(const MCOperand &MO, raw_ostream &O);
// Override MCInstPrinter.
- virtual void printRegName(raw_ostream &O, unsigned RegNo) const
- LLVM_OVERRIDE;
- virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot)
- LLVM_OVERRIDE;
+ void printRegName(raw_ostream &O, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot) override;
private:
// Print various types of operand.
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
index 26a8faeea10b..6e7268de55c1 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmBackend.cpp
@@ -43,37 +43,27 @@ public:
: OSABI(osABI) {}
// Override MCAsmBackend
- virtual unsigned getNumFixupKinds() const LLVM_OVERRIDE {
+ unsigned getNumFixupKinds() const override {
return SystemZ::NumTargetFixupKinds;
}
- virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const
- LLVM_OVERRIDE;
- virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value) const LLVM_OVERRIDE;
- virtual bool mayNeedRelaxation(const MCInst &Inst) const LLVM_OVERRIDE {
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
+ void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const override;
+ bool mayNeedRelaxation(const MCInst &Inst) const override {
return false;
}
- virtual bool fixupNeedsRelaxation(const MCFixup &Fixup,
- uint64_t Value,
- const MCRelaxableFragment *Fragment,
- const MCAsmLayout &Layout) const
- LLVM_OVERRIDE {
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *Fragment,
+ const MCAsmLayout &Layout) const override {
return false;
}
- virtual void relaxInstruction(const MCInst &Inst,
- MCInst &Res) const LLVM_OVERRIDE {
+ void relaxInstruction(const MCInst &Inst, MCInst &Res) const override {
llvm_unreachable("SystemZ does do not have assembler relaxation");
}
- virtual bool writeNopData(uint64_t Count,
- MCObjectWriter *OW) const LLVM_OVERRIDE;
- virtual MCObjectWriter *createObjectWriter(raw_ostream &OS) const
- LLVM_OVERRIDE {
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
+ MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
return createSystemZObjectWriter(OS, OSABI);
}
- virtual bool doesSectionRequireSymbols(const MCSection &Section) const
- LLVM_OVERRIDE {
- return false;
- }
};
} // end anonymous namespace
@@ -95,7 +85,8 @@ SystemZMCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
}
void SystemZMCAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
- unsigned DataSize, uint64_t Value) const {
+ unsigned DataSize, uint64_t Value,
+ bool IsPCRel) const {
MCFixupKind Kind = Fixup.getKind();
unsigned Offset = Fixup.getOffset();
unsigned Size = (getFixupKindInfo(Kind).TargetSize + 7) / 8;
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
index 965c41e2d151..c46a36bdd23d 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -19,8 +19,6 @@ SystemZMCAsmInfo::SystemZMCAsmInfo(StringRef TT) {
IsLittleEndian = false;
CommentString = "#";
- GlobalPrefix = "";
- PrivateGlobalPrefix = ".L";
ZeroDirective = "\t.space\t";
Data64bitsDirective = "\t.quad\t";
UsesELFSectionDirectiveForBSS = true;
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
index b9ac92a6934f..1de97afbfe0a 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
@@ -21,10 +21,9 @@ public:
explicit SystemZMCAsmInfo(StringRef TT);
// Override MCAsmInfo;
- virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const
- LLVM_OVERRIDE;
+ const MCSection *getNonexecutableStackSection(MCContext &Ctx) const override;
};
-} // namespace llvm
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
index f07ea7b31e6a..27b4bd855b3e 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCCodeEmitter.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "mccodeemitter"
#include "MCTargetDesc/SystemZMCTargetDesc.h"
#include "MCTargetDesc/SystemZMCFixups.h"
#include "llvm/MC/MCCodeEmitter.h"
@@ -21,6 +20,8 @@
using namespace llvm;
+#define DEBUG_TYPE "mccodeemitter"
+
namespace {
class SystemZMCCodeEmitter : public MCCodeEmitter {
const MCInstrInfo &MCII;
@@ -34,34 +35,41 @@ public:
~SystemZMCCodeEmitter() {}
// OVerride MCCodeEmitter.
- virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const
- LLVM_OVERRIDE;
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
private:
// Automatically generated by TableGen.
uint64_t getBinaryCodeForInstr(const MCInst &MI,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
// Called by the TableGen code to get the binary encoding of operand
// MO in MI. Fixups is the list of fixups against MI.
uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
// Called by the TableGen code to get the binary encoding of an address.
// The index or length, if any, is encoded first, followed by the base,
// followed by the displacement. In a 20-bit displacement,
// the low 12 bits are encoded before the high 8 bits.
uint64_t getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
uint64_t getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
// Operand OpNum of MI needs a PC-relative fixup of kind Kind at
// Offset bytes from the start of MI. Add the fixup to Fixups
@@ -72,15 +80,17 @@ private:
unsigned Kind, int64_t Offset) const;
uint64_t getPC16DBLEncoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC16DBL, 2);
}
uint64_t getPC32DBLEncoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
return getPCRelEncoding(MI, OpNum, Fixups, SystemZ::FK_390_PC32DBL, 2);
}
};
-}
+} // end anonymous namespace
MCCodeEmitter *llvm::createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
@@ -91,8 +101,9 @@ MCCodeEmitter *llvm::createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
void SystemZMCCodeEmitter::
EncodeInstruction(const MCInst &MI, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Bits = getBinaryCodeForInstr(MI, Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Bits = getBinaryCodeForInstr(MI, Fixups, STI);
unsigned Size = MCII.get(MI.getOpcode()).getSize();
// Big-endian insertion of Size bytes.
unsigned ShiftValue = (Size * 8) - 8;
@@ -104,7 +115,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
uint64_t SystemZMCCodeEmitter::
getMachineOpValue(const MCInst &MI, const MCOperand &MO,
- SmallVectorImpl<MCFixup> &Fixups) const {
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
if (MO.isReg())
return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
if (MO.isImm())
@@ -114,38 +126,42 @@ getMachineOpValue(const MCInst &MI, const MCOperand &MO,
uint64_t SystemZMCCodeEmitter::
getBDAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
assert(isUInt<4>(Base) && isUInt<12>(Disp));
return (Base << 12) | Disp;
}
uint64_t SystemZMCCodeEmitter::
getBDAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
assert(isUInt<4>(Base) && isInt<20>(Disp));
return (Base << 20) | ((Disp & 0xfff) << 8) | ((Disp & 0xff000) >> 12);
}
uint64_t SystemZMCCodeEmitter::
getBDXAddr12Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
- uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups, STI);
assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<4>(Index));
return (Index << 16) | (Base << 12) | Disp;
}
uint64_t SystemZMCCodeEmitter::
getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
- uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups);
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
+ uint64_t Index = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups, STI);
assert(isUInt<4>(Base) && isInt<20>(Disp) && isUInt<4>(Index));
return (Index << 24) | (Base << 20) | ((Disp & 0xfff) << 8)
| ((Disp & 0xff000) >> 12);
@@ -153,10 +169,11 @@ getBDXAddr20Encoding(const MCInst &MI, unsigned OpNum,
uint64_t SystemZMCCodeEmitter::
getBDLAddr12Len8Encoding(const MCInst &MI, unsigned OpNum,
- SmallVectorImpl<MCFixup> &Fixups) const {
- uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups);
- uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups);
- uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups) - 1;
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Base = getMachineOpValue(MI, MI.getOperand(OpNum), Fixups, STI);
+ uint64_t Disp = getMachineOpValue(MI, MI.getOperand(OpNum + 1), Fixups, STI);
+ uint64_t Len = getMachineOpValue(MI, MI.getOperand(OpNum + 2), Fixups, STI) - 1;
assert(isUInt<4>(Base) && isUInt<12>(Disp) && isUInt<8>(Len));
return (Len << 16) | (Base << 12) | Disp;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
index 9c94ebbaac78..a3aab712c08c 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCFixups.h
@@ -14,18 +14,18 @@
namespace llvm {
namespace SystemZ {
- enum FixupKind {
- // These correspond directly to R_390_* relocations.
- FK_390_PC16DBL = FirstTargetFixupKind,
- FK_390_PC32DBL,
- FK_390_PLT16DBL,
- FK_390_PLT32DBL,
+enum FixupKind {
+ // These correspond directly to R_390_* relocations.
+ FK_390_PC16DBL = FirstTargetFixupKind,
+ FK_390_PC32DBL,
+ FK_390_PLT16DBL,
+ FK_390_PLT32DBL,
- // Marker
- LastTargetFixupKind,
- NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
- };
-}
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+} // end namespace SystemZ
} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
index 36e3d83d4d59..c6a181658896 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCObjectWriter.cpp
@@ -24,16 +24,10 @@ public:
protected:
// Override MCELFObjectTargetWriter.
- virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel, bool IsRelocWithSymbol,
- int64_t Addend) const LLVM_OVERRIDE;
- virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const LLVM_OVERRIDE;
+ unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const override;
};
-} // end anonymouse namespace
+} // end anonymous namespace
SystemZObjectWriter::SystemZObjectWriter(uint8_t OSABI)
: MCELFObjectTargetWriter(/*Is64Bit=*/true, OSABI, ELF::EM_S390,
@@ -87,12 +81,8 @@ static unsigned getPLTReloc(unsigned Kind) {
unsigned SystemZObjectWriter::GetRelocType(const MCValue &Target,
const MCFixup &Fixup,
- bool IsPCRel,
- bool IsRelocWithSymbol,
- int64_t Addend) const {
- MCSymbolRefExpr::VariantKind Modifier = (Target.isAbsolute() ?
- MCSymbolRefExpr::VK_None :
- Target.getSymA()->getKind());
+ bool IsPCRel) const {
+ MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
unsigned Kind = Fixup.getKind();
switch (Modifier) {
case MCSymbolRefExpr::VK_None:
@@ -118,21 +108,6 @@ unsigned SystemZObjectWriter::GetRelocType(const MCValue &Target,
}
}
-const MCSymbol *SystemZObjectWriter::ExplicitRelSym(const MCAssembler &Asm,
- const MCValue &Target,
- const MCFragment &F,
- const MCFixup &Fixup,
- bool IsPCRel) const {
- // The addend in a PC-relative R_390_* relocation is always applied to
- // the PC-relative part of the address. If some kind of indirection
- // is applied to the symbol first, we can't use an addend there too.
- if (!Target.isAbsolute() &&
- Target.getSymA()->getKind() != MCSymbolRefExpr::VK_None &&
- IsPCRel)
- return &Target.getSymA()->getSymbol().AliasedSymbol();
- return NULL;
-}
-
MCObjectWriter *llvm::createSystemZObjectWriter(raw_ostream &OS,
uint8_t OSABI) {
MCELFObjectTargetWriter *MOTW = new SystemZObjectWriter(OSABI);
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
index 9e1296b912a0..cc94869eb5fc 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
@@ -16,6 +16,8 @@
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
#define GET_INSTRINFO_MC_DESC
#include "SystemZGenInstrInfo.inc"
@@ -25,8 +27,6 @@
#define GET_REGINFO_MC_DESC
#include "SystemZGenRegisterInfo.inc"
-using namespace llvm;
-
const unsigned SystemZMC::GR32Regs[16] = {
SystemZ::R0L, SystemZ::R1L, SystemZ::R2L, SystemZ::R3L,
SystemZ::R4L, SystemZ::R5L, SystemZ::R6L, SystemZ::R7L,
@@ -98,7 +98,8 @@ static MCAsmInfo *createSystemZMCAsmInfo(const MCRegisterInfo &MRI,
StringRef TT) {
MCAsmInfo *MAI = new SystemZMCAsmInfo(TT);
MCCFIInstruction Inst =
- MCCFIInstruction::createDefCfa(0, MRI.getDwarfRegNum(SystemZ::R15D, true),
+ MCCFIInstruction::createDefCfa(nullptr,
+ MRI.getDwarfRegNum(SystemZ::R15D, true),
SystemZMC::CFAOffsetFromInitialSP);
MAI->addInitialFrameState(Inst);
return MAI;
@@ -185,9 +186,10 @@ static MCStreamer *createSystemZMCObjectStreamer(const Target &T, StringRef TT,
MCAsmBackend &MAB,
raw_ostream &OS,
MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI,
bool RelaxAll,
bool NoExecStack) {
- return createELFStreamer(Ctx, 0, MAB, OS, Emitter, RelaxAll, NoExecStack);
+ return createELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack);
}
extern "C" void LLVMInitializeSystemZTargetMC() {
diff --git a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
index 97e325b984f6..cbaf9a83b80f 100644
--- a/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
+++ b/contrib/llvm/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.h
@@ -28,47 +28,47 @@ class raw_ostream;
extern Target TheSystemZTarget;
namespace SystemZMC {
- // How many bytes are in the ABI-defined, caller-allocated part of
- // a stack frame.
- const int64_t CallFrameSize = 160;
-
- // The offset of the DWARF CFA from the incoming stack pointer.
- const int64_t CFAOffsetFromInitialSP = CallFrameSize;
-
- // Maps of asm register numbers to LLVM register numbers, with 0 indicating
- // an invalid register. In principle we could use 32-bit and 64-bit register
- // classes directly, provided that we relegated the GPR allocation order
- // in SystemZRegisterInfo.td to an AltOrder and left the default order
- // as %r0-%r15. It seems better to provide the same interface for
- // all classes though.
- extern const unsigned GR32Regs[16];
- extern const unsigned GRH32Regs[16];
- extern const unsigned GR64Regs[16];
- extern const unsigned GR128Regs[16];
- extern const unsigned FP32Regs[16];
- extern const unsigned FP64Regs[16];
- extern const unsigned FP128Regs[16];
-
- // Return the 0-based number of the first architectural register that
- // contains the given LLVM register. E.g. R1D -> 1.
- unsigned getFirstReg(unsigned Reg);
-
- // Return the given register as a GR64.
- inline unsigned getRegAsGR64(unsigned Reg) {
- return GR64Regs[getFirstReg(Reg)];
- }
-
- // Return the given register as a low GR32.
- inline unsigned getRegAsGR32(unsigned Reg) {
- return GR32Regs[getFirstReg(Reg)];
- }
-
- // Return the given register as a high GR32.
- inline unsigned getRegAsGRH32(unsigned Reg) {
- return GRH32Regs[getFirstReg(Reg)];
- }
+// How many bytes are in the ABI-defined, caller-allocated part of
+// a stack frame.
+const int64_t CallFrameSize = 160;
+
+// The offset of the DWARF CFA from the incoming stack pointer.
+const int64_t CFAOffsetFromInitialSP = CallFrameSize;
+
+// Maps of asm register numbers to LLVM register numbers, with 0 indicating
+// an invalid register. In principle we could use 32-bit and 64-bit register
+// classes directly, provided that we relegated the GPR allocation order
+// in SystemZRegisterInfo.td to an AltOrder and left the default order
+// as %r0-%r15. It seems better to provide the same interface for
+// all classes though.
+extern const unsigned GR32Regs[16];
+extern const unsigned GRH32Regs[16];
+extern const unsigned GR64Regs[16];
+extern const unsigned GR128Regs[16];
+extern const unsigned FP32Regs[16];
+extern const unsigned FP64Regs[16];
+extern const unsigned FP128Regs[16];
+
+// Return the 0-based number of the first architectural register that
+// contains the given LLVM register. E.g. R1D -> 1.
+unsigned getFirstReg(unsigned Reg);
+
+// Return the given register as a GR64.
+inline unsigned getRegAsGR64(unsigned Reg) {
+ return GR64Regs[getFirstReg(Reg)];
}
+// Return the given register as a low GR32.
+inline unsigned getRegAsGR32(unsigned Reg) {
+ return GR32Regs[getFirstReg(Reg)];
+}
+
+// Return the given register as a high GR32.
+inline unsigned getRegAsGRH32(unsigned Reg) {
+ return GRH32Regs[getFirstReg(Reg)];
+}
+} // end namespace SystemZMC
+
MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII,
const MCRegisterInfo &MRI,
const MCSubtargetInfo &STI,
diff --git a/contrib/llvm/lib/Target/SystemZ/README.txt b/contrib/llvm/lib/Target/SystemZ/README.txt
index afa6cf090d07..e089047d013e 100644
--- a/contrib/llvm/lib/Target/SystemZ/README.txt
+++ b/contrib/llvm/lib/Target/SystemZ/README.txt
@@ -166,13 +166,6 @@ See CodeGen/SystemZ/alloca-01.ll for an example.
--
-Atomic loads and stores use the default compare-and-swap based implementation.
-This is much too conservative in practice, since the architecture guarantees
-that 1-, 2-, 4- and 8-byte loads and stores to aligned addresses are
-inherently atomic.
-
---
-
If needed, we can support 16-byte atomics using LPQ, STPQ and CSDG.
--
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.h b/contrib/llvm/lib/Target/SystemZ/SystemZ.h
index dcebbad59071..15792494930a 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZ.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZ.h
@@ -19,97 +19,98 @@
#include "llvm/Support/CodeGen.h"
namespace llvm {
- class SystemZTargetMachine;
- class FunctionPass;
-
- namespace SystemZ {
- // Condition-code mask values.
- const unsigned CCMASK_0 = 1 << 3;
- const unsigned CCMASK_1 = 1 << 2;
- const unsigned CCMASK_2 = 1 << 1;
- const unsigned CCMASK_3 = 1 << 0;
- const unsigned CCMASK_ANY = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
-
- // Condition-code mask assignments for integer and floating-point
- // comparisons.
- const unsigned CCMASK_CMP_EQ = CCMASK_0;
- const unsigned CCMASK_CMP_LT = CCMASK_1;
- const unsigned CCMASK_CMP_GT = CCMASK_2;
- const unsigned CCMASK_CMP_NE = CCMASK_CMP_LT | CCMASK_CMP_GT;
- const unsigned CCMASK_CMP_LE = CCMASK_CMP_EQ | CCMASK_CMP_LT;
- const unsigned CCMASK_CMP_GE = CCMASK_CMP_EQ | CCMASK_CMP_GT;
-
- // Condition-code mask assignments for floating-point comparisons only.
- const unsigned CCMASK_CMP_UO = CCMASK_3;
- const unsigned CCMASK_CMP_O = CCMASK_ANY ^ CCMASK_CMP_UO;
-
- // All condition-code values produced by comparisons.
- const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2;
- const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
-
- // Condition-code mask assignments for CS.
- const unsigned CCMASK_CS_EQ = CCMASK_0;
- const unsigned CCMASK_CS_NE = CCMASK_1;
- const unsigned CCMASK_CS = CCMASK_0 | CCMASK_1;
-
- // Condition-code mask assignments for a completed SRST loop.
- const unsigned CCMASK_SRST_FOUND = CCMASK_1;
- const unsigned CCMASK_SRST_NOTFOUND = CCMASK_2;
- const unsigned CCMASK_SRST = CCMASK_1 | CCMASK_2;
-
- // Condition-code mask assignments for TEST UNDER MASK.
- const unsigned CCMASK_TM_ALL_0 = CCMASK_0;
- const unsigned CCMASK_TM_MIXED_MSB_0 = CCMASK_1;
- const unsigned CCMASK_TM_MIXED_MSB_1 = CCMASK_2;
- const unsigned CCMASK_TM_ALL_1 = CCMASK_3;
- const unsigned CCMASK_TM_SOME_0 = CCMASK_TM_ALL_1 ^ CCMASK_ANY;
- const unsigned CCMASK_TM_SOME_1 = CCMASK_TM_ALL_0 ^ CCMASK_ANY;
- const unsigned CCMASK_TM_MSB_0 = CCMASK_0 | CCMASK_1;
- const unsigned CCMASK_TM_MSB_1 = CCMASK_2 | CCMASK_3;
- const unsigned CCMASK_TM = CCMASK_ANY;
-
- // The position of the low CC bit in an IPM result.
- const unsigned IPM_CC = 28;
-
- // Mask assignments for PFD.
- const unsigned PFD_READ = 1;
- const unsigned PFD_WRITE = 2;
-
- // Return true if Val fits an LLILL operand.
- static inline bool isImmLL(uint64_t Val) {
- return (Val & ~0x000000000000ffffULL) == 0;
- }
-
- // Return true if Val fits an LLILH operand.
- static inline bool isImmLH(uint64_t Val) {
- return (Val & ~0x00000000ffff0000ULL) == 0;
- }
-
- // Return true if Val fits an LLIHL operand.
- static inline bool isImmHL(uint64_t Val) {
- return (Val & ~0x00000ffff00000000ULL) == 0;
- }
-
- // Return true if Val fits an LLIHH operand.
- static inline bool isImmHH(uint64_t Val) {
- return (Val & ~0xffff000000000000ULL) == 0;
- }
-
- // Return true if Val fits an LLILF operand.
- static inline bool isImmLF(uint64_t Val) {
- return (Val & ~0x00000000ffffffffULL) == 0;
- }
-
- // Return true if Val fits an LLIHF operand.
- static inline bool isImmHF(uint64_t Val) {
- return (Val & ~0xffffffff00000000ULL) == 0;
- }
- }
-
- FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
- CodeGenOpt::Level OptLevel);
- FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
- FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
- FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
-} // end namespace llvm;
+class SystemZTargetMachine;
+class FunctionPass;
+
+namespace SystemZ {
+// Condition-code mask values.
+const unsigned CCMASK_0 = 1 << 3;
+const unsigned CCMASK_1 = 1 << 2;
+const unsigned CCMASK_2 = 1 << 1;
+const unsigned CCMASK_3 = 1 << 0;
+const unsigned CCMASK_ANY = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
+
+// Condition-code mask assignments for integer and floating-point
+// comparisons.
+const unsigned CCMASK_CMP_EQ = CCMASK_0;
+const unsigned CCMASK_CMP_LT = CCMASK_1;
+const unsigned CCMASK_CMP_GT = CCMASK_2;
+const unsigned CCMASK_CMP_NE = CCMASK_CMP_LT | CCMASK_CMP_GT;
+const unsigned CCMASK_CMP_LE = CCMASK_CMP_EQ | CCMASK_CMP_LT;
+const unsigned CCMASK_CMP_GE = CCMASK_CMP_EQ | CCMASK_CMP_GT;
+
+// Condition-code mask assignments for floating-point comparisons only.
+const unsigned CCMASK_CMP_UO = CCMASK_3;
+const unsigned CCMASK_CMP_O = CCMASK_ANY ^ CCMASK_CMP_UO;
+
+// All condition-code values produced by comparisons.
+const unsigned CCMASK_ICMP = CCMASK_0 | CCMASK_1 | CCMASK_2;
+const unsigned CCMASK_FCMP = CCMASK_0 | CCMASK_1 | CCMASK_2 | CCMASK_3;
+
+// Condition-code mask assignments for CS.
+const unsigned CCMASK_CS_EQ = CCMASK_0;
+const unsigned CCMASK_CS_NE = CCMASK_1;
+const unsigned CCMASK_CS = CCMASK_0 | CCMASK_1;
+
+// Condition-code mask assignments for a completed SRST loop.
+const unsigned CCMASK_SRST_FOUND = CCMASK_1;
+const unsigned CCMASK_SRST_NOTFOUND = CCMASK_2;
+const unsigned CCMASK_SRST = CCMASK_1 | CCMASK_2;
+
+// Condition-code mask assignments for TEST UNDER MASK.
+const unsigned CCMASK_TM_ALL_0 = CCMASK_0;
+const unsigned CCMASK_TM_MIXED_MSB_0 = CCMASK_1;
+const unsigned CCMASK_TM_MIXED_MSB_1 = CCMASK_2;
+const unsigned CCMASK_TM_ALL_1 = CCMASK_3;
+const unsigned CCMASK_TM_SOME_0 = CCMASK_TM_ALL_1 ^ CCMASK_ANY;
+const unsigned CCMASK_TM_SOME_1 = CCMASK_TM_ALL_0 ^ CCMASK_ANY;
+const unsigned CCMASK_TM_MSB_0 = CCMASK_0 | CCMASK_1;
+const unsigned CCMASK_TM_MSB_1 = CCMASK_2 | CCMASK_3;
+const unsigned CCMASK_TM = CCMASK_ANY;
+
+// The position of the low CC bit in an IPM result.
+const unsigned IPM_CC = 28;
+
+// Mask assignments for PFD.
+const unsigned PFD_READ = 1;
+const unsigned PFD_WRITE = 2;
+
+// Return true if Val fits an LLILL operand.
+static inline bool isImmLL(uint64_t Val) {
+ return (Val & ~0x000000000000ffffULL) == 0;
+}
+
+// Return true if Val fits an LLILH operand.
+static inline bool isImmLH(uint64_t Val) {
+ return (Val & ~0x00000000ffff0000ULL) == 0;
+}
+
+// Return true if Val fits an LLIHL operand.
+static inline bool isImmHL(uint64_t Val) {
+ return (Val & ~0x00000ffff00000000ULL) == 0;
+}
+
+// Return true if Val fits an LLIHH operand.
+static inline bool isImmHH(uint64_t Val) {
+ return (Val & ~0xffff000000000000ULL) == 0;
+}
+
+// Return true if Val fits an LLILF operand.
+static inline bool isImmLF(uint64_t Val) {
+ return (Val & ~0x00000000ffffffffULL) == 0;
+}
+
+// Return true if Val fits an LLIHF operand.
+static inline bool isImmHF(uint64_t Val) {
+ return (Val & ~0xffffffff00000000ULL) == 0;
+}
+} // end namespace SystemZ
+
+FunctionPass *createSystemZISelDag(SystemZTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+FunctionPass *createSystemZElimComparePass(SystemZTargetMachine &TM);
+FunctionPass *createSystemZShortenInstPass(SystemZTargetMachine &TM);
+FunctionPass *createSystemZLongBranchPass(SystemZTargetMachine &TM);
+} // end namespace llvm
+
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZ.td b/contrib/llvm/lib/Target/SystemZ/SystemZ.td
index abf5c8eb320c..5f829034902f 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZ.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZ.td
@@ -53,20 +53,10 @@ def SystemZAsmParser : AsmParser {
}
//===----------------------------------------------------------------------===//
-// Assembly writer
-//===----------------------------------------------------------------------===//
-
-def SystemZAsmWriter : AsmWriter {
- string AsmWriterClassName = "InstPrinter";
- bit isMCAsmWriter = 1;
-}
-
-//===----------------------------------------------------------------------===//
// Top-level target declaration
//===----------------------------------------------------------------------===//
def SystemZ : Target {
let InstructionSet = SystemZInstrInfo;
let AssemblyParsers = [SystemZAsmParser];
- let AssemblyWriters = [SystemZAsmWriter];
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index 75cbda4958a2..8b18bc16e1c6 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -18,11 +18,11 @@
#include "SystemZMCInstLower.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
@@ -151,11 +151,20 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
#undef LOWER_HIGH
+ case SystemZ::Serialize:
+ if (Subtarget->hasFastSerialization())
+ LoweredMI = MCInstBuilder(SystemZ::AsmBCR)
+ .addImm(14).addReg(SystemZ::R0D);
+ else
+ LoweredMI = MCInstBuilder(SystemZ::AsmBCR)
+ .addImm(15).addReg(SystemZ::R0D);
+ break;
+
default:
Lower.lower(MI, LoweredMI);
break;
}
- OutStreamer.EmitInstruction(LoweredMI);
+ EmitToStreamer(OutStreamer, LoweredMI);
}
// Convert a SystemZ-specific constant pool modifier into the associated
@@ -170,8 +179,7 @@ getModifierVariantKind(SystemZCP::SystemZCPModifier Modifier) {
void SystemZAsmPrinter::
EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
- SystemZConstantPoolValue *ZCPV =
- static_cast<SystemZConstantPoolValue*>(MCPV);
+ auto *ZCPV = static_cast<SystemZConstantPoolValue*>(MCPV);
const MCExpr *Expr =
MCSymbolRefExpr::Create(getSymbol(ZCPV->getGlobalValue()),
@@ -212,7 +220,7 @@ bool SystemZAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
void SystemZAsmPrinter::EmitEndOfAsmFile(Module &M) {
if (Subtarget->isTargetELF()) {
- const TargetLoweringObjectFileELF &TLOFELF =
+ auto &TLOFELF =
static_cast<const TargetLoweringObjectFileELF &>(getObjFileLowering());
MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
index 4b6c51b6f0b3..20093bc614d8 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZAsmPrinter.h
@@ -32,20 +32,18 @@ public:
}
// Override AsmPrinter.
- virtual const char *getPassName() const LLVM_OVERRIDE {
+ const char *getPassName() const override {
return "SystemZ Assembly Printer";
}
- virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE;
- virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV)
- LLVM_OVERRIDE;
- virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant, const char *ExtraCode,
- raw_ostream &OS) LLVM_OVERRIDE;
- virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
- unsigned AsmVariant,
- const char *ExtraCode,
- raw_ostream &OS) LLVM_OVERRIDE;
- virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE;
+ void EmitInstruction(const MachineInstr *MI) override;
+ void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) override;
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
+ void EmitEndOfAsmFile(Module &M) override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
index 298985e7e514..4b1569d2bd72 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.h
@@ -11,13 +11,13 @@
#define SYSTEMZCALLINGCONV_H
namespace llvm {
- namespace SystemZ {
- const unsigned NumArgGPRs = 5;
- extern const unsigned ArgGPRs[NumArgGPRs];
+namespace SystemZ {
+ const unsigned NumArgGPRs = 5;
+ extern const unsigned ArgGPRs[NumArgGPRs];
- const unsigned NumArgFPRs = 4;
- extern const unsigned ArgFPRs[NumArgFPRs];
- }
-}
+ const unsigned NumArgFPRs = 4;
+ extern const unsigned ArgFPRs[NumArgFPRs];
+} // end namespace SystemZ
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
index c4f641e7bdec..fb0d1d8a3fe7 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZCallingConv.td
@@ -13,7 +13,7 @@ class CCIfExtend<CCAction A>
: CCIf<"ArgFlags.isSExt() || ArgFlags.isZExt()", A>;
//===----------------------------------------------------------------------===//
-// SVR4 return value calling convention
+// z/Linux return value calling convention
//===----------------------------------------------------------------------===//
def RetCC_SystemZ : CallingConv<[
// Promote i32 to i64 if it has an explicit extension type.
@@ -39,7 +39,7 @@ def RetCC_SystemZ : CallingConv<[
]>;
//===----------------------------------------------------------------------===//
-// SVR4 argument calling conventions
+// z/Linux argument calling conventions
//===----------------------------------------------------------------------===//
def CC_SystemZ : CallingConv<[
// Promote i32 to i64 if it has an explicit extension type.
@@ -63,3 +63,9 @@ def CC_SystemZ : CallingConv<[
// Other arguments are passed in 8-byte-aligned 8-byte stack slots.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
]>;
+
+//===----------------------------------------------------------------------===//
+// z/Linux callee-saved registers
+//===----------------------------------------------------------------------===//
+def CSR_SystemZ : CalleeSavedRegs<(add (sequence "R%dD", 6, 15),
+ (sequence "F%dD", 8, 15))>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
index 6c7081169a96..19cec219e2d1 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.cpp
@@ -43,7 +43,7 @@ getExistingMachineCPValue(MachineConstantPool *CP, unsigned Alignment) {
for (unsigned I = 0, E = Constants.size(); I != E; ++I) {
if (Constants[I].isMachineConstantPoolEntry() &&
(Constants[I].getAlignment() & AlignMask) == 0) {
- SystemZConstantPoolValue *ZCPV =
+ auto *ZCPV =
static_cast<SystemZConstantPoolValue *>(Constants[I].Val.MachineCPVal);
if (ZCPV->GV == GV && ZCPV->Modifier == Modifier)
return I;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
index 9927bdb262c4..699718f5c80e 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZConstantPoolValue.h
@@ -18,10 +18,10 @@ namespace llvm {
class GlobalValue;
namespace SystemZCP {
- enum SystemZCPModifier {
- NTPOFF
- };
-}
+enum SystemZCPModifier {
+ NTPOFF
+};
+} // end namespace SystemZCP
/// A SystemZ-specific constant pool value. At present, the only
/// defined constant pool values are offsets of thread-local variables
@@ -39,17 +39,17 @@ public:
Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier);
// Override MachineConstantPoolValue.
- virtual unsigned getRelocationInfo() const LLVM_OVERRIDE;
- virtual int getExistingMachineCPValue(MachineConstantPool *CP,
- unsigned Alignment) LLVM_OVERRIDE;
- virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) LLVM_OVERRIDE;
- virtual void print(raw_ostream &O) const LLVM_OVERRIDE;
+ unsigned getRelocationInfo() const override;
+ int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) override;
+ void addSelectionDAGCSEId(FoldingSetNodeID &ID) override;
+ void print(raw_ostream &O) const override;
// Access SystemZ-specific fields.
const GlobalValue *getGlobalValue() const { return GV; }
SystemZCP::SystemZCPModifier getModifier() const { return Modifier; }
};
-} // End llvm namespace
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
index b8a77db0f845..dc210d608631 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -13,8 +13,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-elim-compare"
-
#include "SystemZTargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -28,78 +26,79 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-elim-compare"
+
STATISTIC(BranchOnCounts, "Number of branch-on-count instructions");
STATISTIC(EliminatedComparisons, "Number of eliminated comparisons");
STATISTIC(FusedComparisons, "Number of fused compare-and-branch instructions");
namespace {
- // Represents the references to a particular register in one or more
- // instructions.
- struct Reference {
- Reference()
- : Def(false), Use(false), IndirectDef(false), IndirectUse(false) {}
-
- Reference &operator|=(const Reference &Other) {
- Def |= Other.Def;
- IndirectDef |= Other.IndirectDef;
- Use |= Other.Use;
- IndirectUse |= Other.IndirectUse;
- return *this;
- }
+// Represents the references to a particular register in one or more
+// instructions.
+struct Reference {
+ Reference()
+ : Def(false), Use(false), IndirectDef(false), IndirectUse(false) {}
+
+ Reference &operator|=(const Reference &Other) {
+ Def |= Other.Def;
+ IndirectDef |= Other.IndirectDef;
+ Use |= Other.Use;
+ IndirectUse |= Other.IndirectUse;
+ return *this;
+ }
- operator bool() const { return Def || Use; }
+ operator bool() const { return Def || Use; }
- // True if the register is defined or used in some form, either directly or
- // via a sub- or super-register.
- bool Def;
- bool Use;
+ // True if the register is defined or used in some form, either directly or
+ // via a sub- or super-register.
+ bool Def;
+ bool Use;
- // True if the register is defined or used indirectly, by a sub- or
- // super-register.
- bool IndirectDef;
- bool IndirectUse;
- };
+ // True if the register is defined or used indirectly, by a sub- or
+ // super-register.
+ bool IndirectDef;
+ bool IndirectUse;
+};
- class SystemZElimCompare : public MachineFunctionPass {
- public:
- static char ID;
- SystemZElimCompare(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(0), TRI(0) {}
+class SystemZElimCompare : public MachineFunctionPass {
+public:
+ static char ID;
+ SystemZElimCompare(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr) {}
- virtual const char *getPassName() const {
- return "SystemZ Comparison Elimination";
- }
+ const char *getPassName() const override {
+ return "SystemZ Comparison Elimination";
+ }
+
+ bool processBlock(MachineBasicBlock &MBB);
+ bool runOnMachineFunction(MachineFunction &F) override;
- bool processBlock(MachineBasicBlock *MBB);
- bool runOnMachineFunction(MachineFunction &F);
-
- private:
- Reference getRegReferences(MachineInstr *MI, unsigned Reg);
- bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers);
- bool convertToLoadAndTest(MachineInstr *MI);
- bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers);
- bool optimizeCompareZero(MachineInstr *Compare,
+private:
+ Reference getRegReferences(MachineInstr *MI, unsigned Reg);
+ bool convertToBRCT(MachineInstr *MI, MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool convertToLoadAndTest(MachineInstr *MI);
+ bool adjustCCMasksForInstr(MachineInstr *MI, MachineInstr *Compare,
SmallVectorImpl<MachineInstr *> &CCUsers);
- bool fuseCompareAndBranch(MachineInstr *Compare,
- SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool optimizeCompareZero(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
+ bool fuseCompareAndBranch(MachineInstr *Compare,
+ SmallVectorImpl<MachineInstr *> &CCUsers);
- const SystemZInstrInfo *TII;
- const TargetRegisterInfo *TRI;
- };
+ const SystemZInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+};
- char SystemZElimCompare::ID = 0;
-} // end of anonymous namespace
+char SystemZElimCompare::ID = 0;
+} // end anonymous namespace
FunctionPass *llvm::createSystemZElimComparePass(SystemZTargetMachine &TM) {
return new SystemZElimCompare(TM);
}
// Return true if CC is live out of MBB.
-static bool isCCLiveOut(MachineBasicBlock *MBB) {
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI)
+static bool isCCLiveOut(MachineBasicBlock &MBB) {
+ for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI)
if ((*SI)->isLiveIn(SystemZ::CC))
return true;
return false;
@@ -328,8 +327,8 @@ optimizeCompareZero(MachineInstr *Compare,
// Search back for CC results that are based on the first operand.
unsigned SrcReg = Compare->getOperand(0).getReg();
unsigned SrcSubReg = Compare->getOperand(0).getSubReg();
- MachineBasicBlock *MBB = Compare->getParent();
- MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB->begin();
+ MachineBasicBlock &MBB = *Compare->getParent();
+ MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB.begin();
Reference CCRefs;
Reference SrcRefs;
while (MBBI != MBBE) {
@@ -424,7 +423,7 @@ fuseCompareAndBranch(MachineInstr *Compare,
// Process all comparison instructions in MBB. Return true if something
// changed.
-bool SystemZElimCompare::processBlock(MachineBasicBlock *MBB) {
+bool SystemZElimCompare::processBlock(MachineBasicBlock &MBB) {
bool Changed = false;
// Walk backwards through the block looking for comparisons, recording
@@ -432,8 +431,8 @@ bool SystemZElimCompare::processBlock(MachineBasicBlock *MBB) {
// instructions before it.
bool CompleteCCUsers = !isCCLiveOut(MBB);
SmallVector<MachineInstr *, 4> CCUsers;
- MachineBasicBlock::iterator MBBI = MBB->end();
- while (MBBI != MBB->begin()) {
+ MachineBasicBlock::iterator MBBI = MBB.end();
+ while (MBBI != MBB.begin()) {
MachineInstr *MI = --MBBI;
if (CompleteCCUsers &&
MI->isCompare() &&
@@ -463,9 +462,8 @@ bool SystemZElimCompare::runOnMachineFunction(MachineFunction &F) {
TRI = &TII->getRegisterInfo();
bool Changed = false;
- for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
- MFI != MFE; ++MFI)
- Changed |= processBlock(MFI);
+ for (auto &MBB : F)
+ Changed |= processBlock(MBB);
return Changed;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
index acfb491b953b..055dbe914995 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -10,8 +10,9 @@
#include "SystemZFrameLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZInstrBuilder.h"
+#include "SystemZInstrInfo.h"
#include "SystemZMachineFunctionInfo.h"
-#include "SystemZTargetMachine.h"
+#include "SystemZRegisterInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
@@ -20,35 +21,33 @@
using namespace llvm;
namespace {
- // The ABI-defined register save slots, relative to the incoming stack
- // pointer.
- static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {
- { SystemZ::R2D, 0x10 },
- { SystemZ::R3D, 0x18 },
- { SystemZ::R4D, 0x20 },
- { SystemZ::R5D, 0x28 },
- { SystemZ::R6D, 0x30 },
- { SystemZ::R7D, 0x38 },
- { SystemZ::R8D, 0x40 },
- { SystemZ::R9D, 0x48 },
- { SystemZ::R10D, 0x50 },
- { SystemZ::R11D, 0x58 },
- { SystemZ::R12D, 0x60 },
- { SystemZ::R13D, 0x68 },
- { SystemZ::R14D, 0x70 },
- { SystemZ::R15D, 0x78 },
- { SystemZ::F0D, 0x80 },
- { SystemZ::F2D, 0x88 },
- { SystemZ::F4D, 0x90 },
- { SystemZ::F6D, 0x98 }
- };
-}
-
-SystemZFrameLowering::SystemZFrameLowering(const SystemZTargetMachine &tm,
- const SystemZSubtarget &sti)
- : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8,
- -SystemZMC::CallFrameSize, 8),
- TM(tm), STI(sti) {
+// The ABI-defined register save slots, relative to the incoming stack
+// pointer.
+static const TargetFrameLowering::SpillSlot SpillOffsetTable[] = {
+ { SystemZ::R2D, 0x10 },
+ { SystemZ::R3D, 0x18 },
+ { SystemZ::R4D, 0x20 },
+ { SystemZ::R5D, 0x28 },
+ { SystemZ::R6D, 0x30 },
+ { SystemZ::R7D, 0x38 },
+ { SystemZ::R8D, 0x40 },
+ { SystemZ::R9D, 0x48 },
+ { SystemZ::R10D, 0x50 },
+ { SystemZ::R11D, 0x58 },
+ { SystemZ::R12D, 0x60 },
+ { SystemZ::R13D, 0x68 },
+ { SystemZ::R14D, 0x70 },
+ { SystemZ::R15D, 0x78 },
+ { SystemZ::F0D, 0x80 },
+ { SystemZ::F2D, 0x88 },
+ { SystemZ::F4D, 0x90 },
+ { SystemZ::F6D, 0x98 }
+};
+} // end anonymous namespace
+
+SystemZFrameLowering::SystemZFrameLowering()
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown, 8,
+ -SystemZMC::CallFrameSize, 8) {
// Create a mapping from register number to save slot offset.
RegSpillOffsets.grow(SystemZ::NUM_TARGET_REGS);
for (unsigned I = 0, E = array_lengthof(SpillOffsetTable); I != E; ++I)
@@ -93,7 +92,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// save and restore the stack pointer at the same time, via STMG and LMG.
// This allows the deallocation to be done by the LMG, rather than needing
// a separate %r15 addition.
- const uint16_t *CSRegs = TRI->getCalleeSavedRegs(&MF);
+ const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
for (unsigned I = 0; CSRegs[I]; ++I) {
unsigned Reg = CSRegs[I];
if (SystemZ::GR64BitRegClass.contains(Reg) && MRI.isPhysRegUsed(Reg)) {
@@ -108,9 +107,8 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
// instruction, or an implicit one that comes between the explicit start
// and end registers.
static void addSavedGPR(MachineBasicBlock &MBB, MachineInstrBuilder &MIB,
- const SystemZTargetMachine &TM,
unsigned GPR64, bool IsImplicit) {
- const SystemZRegisterInfo *RI = TM.getRegisterInfo();
+ const TargetRegisterInfo *RI = MBB.getParent()->getTarget().getRegisterInfo();
unsigned GPR32 = RI->getSubReg(GPR64, SystemZ::subreg_l32);
bool IsLive = MBB.isLiveIn(GPR64) || MBB.isLiveIn(GPR32);
if (!IsLive || !IsImplicit) {
@@ -176,8 +174,8 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(SystemZ::STMG));
// Add the explicit register operands.
- addSavedGPR(MBB, MIB, TM, LowGPR, false);
- addSavedGPR(MBB, MIB, TM, HighGPR, false);
+ addSavedGPR(MBB, MIB, LowGPR, false);
+ addSavedGPR(MBB, MIB, HighGPR, false);
// Add the address.
MIB.addReg(SystemZ::R15D).addImm(StartOffset);
@@ -187,13 +185,13 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB,
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
unsigned Reg = CSI[I].getReg();
if (SystemZ::GR64BitRegClass.contains(Reg))
- addSavedGPR(MBB, MIB, TM, Reg, true);
+ addSavedGPR(MBB, MIB, Reg, true);
}
// ...likewise GPR varargs.
if (IsVarArg)
for (unsigned I = ZFI->getVarArgsFirstGPR(); I < SystemZ::NumArgGPRs; ++I)
- addSavedGPR(MBB, MIB, TM, SystemZ::ArgGPRs[I], true);
+ addSavedGPR(MBB, MIB, SystemZ::ArgGPRs[I], true);
}
// Save FPRs in the normal TargetInstrInfo way.
@@ -312,7 +310,7 @@ static void emitIncrement(MachineBasicBlock &MBB,
void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front();
MachineFrameInfo *MFFrame = MF.getFrameInfo();
- const SystemZInstrInfo *ZII =
+ auto *ZII =
static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.begin();
@@ -333,16 +331,14 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
llvm_unreachable("Couldn't skip over GPR saves");
// Add CFI for the GPR saves.
- MCSymbol *GPRSaveLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL,
- ZII->get(TargetOpcode::PROLOG_LABEL)).addSym(GPRSaveLabel);
- for (std::vector<CalleeSavedInfo>::const_iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {
- unsigned Reg = I->getReg();
+ for (auto &Save : CSI) {
+ unsigned Reg = Save.getReg();
if (SystemZ::GR64BitRegClass.contains(Reg)) {
int64_t Offset = SPOffsetFromCFA + RegSpillOffsets[Reg];
- MMI.addFrameInst(MCCFIInstruction::createOffset(
- GPRSaveLabel, MRI->getDwarfRegNum(Reg, true), Offset));
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
}
}
}
@@ -354,11 +350,10 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
emitIncrement(MBB, MBBI, DL, SystemZ::R15D, Delta, ZII);
// Add CFI for the allocation.
- MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
- .addSym(AdjustSPLabel);
- MMI.addFrameInst(MCCFIInstruction::createDefCfaOffset(
- AdjustSPLabel, SPOffsetFromCFA + Delta));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, SPOffsetFromCFA + Delta));
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
SPOffsetFromCFA += Delta;
}
@@ -368,26 +363,23 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
.addReg(SystemZ::R15D);
// Add CFI for the new frame location.
- MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
- BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
- .addSym(SetFPLabel);
unsigned HardFP = MRI->getDwarfRegNum(SystemZ::R11D, true);
- MMI.addFrameInst(
- MCCFIInstruction::createDefCfaRegister(SetFPLabel, HardFP));
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, HardFP));
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
// Mark the FramePtr as live at the beginning of every block except
// the entry block. (We'll have marked R11 as live on entry when
// saving the GPRs.)
- for (MachineFunction::iterator
- I = llvm::next(MF.begin()), E = MF.end(); I != E; ++I)
+ for (auto I = std::next(MF.begin()), E = MF.end(); I != E; ++I)
I->addLiveIn(SystemZ::R11D);
}
// Skip over the FPR saves.
- MCSymbol *FPRSaveLabel = 0;
- for (std::vector<CalleeSavedInfo>::const_iterator
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {
- unsigned Reg = I->getReg();
+ SmallVector<unsigned, 8> CFIIndexes;
+ for (auto &Save : CSI) {
+ unsigned Reg = Save.getReg();
if (SystemZ::FP64BitRegClass.contains(Reg)) {
if (MBBI != MBB.end() &&
(MBBI->getOpcode() == SystemZ::STD ||
@@ -397,25 +389,25 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
llvm_unreachable("Couldn't skip over FPR save");
// Add CFI for the this save.
- if (!FPRSaveLabel)
- FPRSaveLabel = MMI.getContext().CreateTempSymbol();
- unsigned Reg = MRI->getDwarfRegNum(I->getReg(), true);
- int64_t Offset = getFrameIndexOffset(MF, I->getFrameIdx());
- MMI.addFrameInst(MCCFIInstruction::createOffset(
- FPRSaveLabel, Reg, SPOffsetFromCFA + Offset));
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ int64_t Offset = getFrameIndexOffset(MF, Save.getFrameIdx());
+ unsigned CFIIndex = MMI.addFrameInst(MCCFIInstruction::createOffset(
+ nullptr, DwarfReg, SPOffsetFromCFA + Offset));
+ CFIIndexes.push_back(CFIIndex);
}
}
// Complete the CFI for the FPR saves, modelling them as taking effect
// after the last save.
- if (FPRSaveLabel)
- BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::PROLOG_LABEL))
- .addSym(FPRSaveLabel);
+ for (auto CFIIndex : CFIIndexes) {
+ BuildMI(MBB, MBBI, DL, ZII->get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
}
void SystemZFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
- const SystemZInstrInfo *ZII =
+ auto *ZII =
static_cast<const SystemZInstrInfo*>(MF.getTarget().getInstrInfo());
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
index 9b0a1d5f224c..4d5fe6dce62d 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZFrameLowering.h
@@ -10,7 +10,6 @@
#ifndef SYSTEMZFRAMELOWERING_H
#define SYSTEMZFRAMELOWERING_H
-#include "SystemZSubtarget.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/Target/TargetFrameLowering.h"
@@ -21,48 +20,35 @@ class SystemZSubtarget;
class SystemZFrameLowering : public TargetFrameLowering {
IndexedMap<unsigned> RegSpillOffsets;
-protected:
- const SystemZTargetMachine &TM;
- const SystemZSubtarget &STI;
-
public:
- SystemZFrameLowering(const SystemZTargetMachine &tm,
- const SystemZSubtarget &sti);
+ SystemZFrameLowering();
// Override TargetFrameLowering.
- virtual bool isFPCloseToIncomingSP() const LLVM_OVERRIDE { return false; }
- virtual const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const
- LLVM_OVERRIDE;
- virtual void
- processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
- RegScavenger *RS) const LLVM_OVERRIDE;
- virtual bool
- spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const
- LLVM_OVERRIDE;
- virtual bool
- restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBII,
- const std::vector<CalleeSavedInfo> &CSI,
- const TargetRegisterInfo *TRI) const
- LLVM_OVERRIDE;
- virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
- RegScavenger *RS) const;
- virtual void emitPrologue(MachineFunction &MF) const LLVM_OVERRIDE;
- virtual void emitEpilogue(MachineFunction &MF,
- MachineBasicBlock &MBB) const LLVM_OVERRIDE;
- virtual bool hasFP(const MachineFunction &MF) const LLVM_OVERRIDE;
- virtual int getFrameIndexOffset(const MachineFunction &MF,
- int FI) const LLVM_OVERRIDE;
- virtual bool hasReservedCallFrame(const MachineFunction &MF) const
- LLVM_OVERRIDE;
- virtual void
- eliminateCallFramePseudoInstr(MachineFunction &MF,
- MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI) const
- LLVM_OVERRIDE;
+ bool isFPCloseToIncomingSP() const override { return false; }
+ const SpillSlot *getCalleeSavedSpillSlots(unsigned &NumEntries) const
+ override;
+ void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const override;
+ bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const override;
+ bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBII,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const
+ override;
+ void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS) const override;
+ void emitPrologue(MachineFunction &MF) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ bool hasFP(const MachineFunction &MF) const override;
+ int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ void eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const
+ override;
// Return the number of bytes in the callee-allocated part of the frame.
uint64_t getAllocatedStackSize(const MachineFunction &MF) const;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index f4a27733ce0e..24f7584ae9c6 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -19,6 +19,8 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-isel"
+
namespace {
// Used to build addressing modes.
struct SystemZAddressingMode {
@@ -72,14 +74,14 @@ struct SystemZAddressingMode {
errs() << "SystemZAddressingMode " << this << '\n';
errs() << " Base ";
- if (Base.getNode() != 0)
+ if (Base.getNode())
Base.getNode()->dump();
else
errs() << "null\n";
if (hasIndexField()) {
errs() << " Index ";
- if (Index.getNode() != 0)
+ if (Index.getNode())
Index.getNode()->dump();
else
errs() << "null\n";
@@ -318,16 +320,14 @@ public:
Subtarget(*TM.getSubtargetImpl()) { }
// Override MachineFunctionPass.
- virtual const char *getPassName() const LLVM_OVERRIDE {
+ const char *getPassName() const override {
return "SystemZ DAG->DAG Pattern Instruction Selection";
}
// Override SelectionDAGISel.
- virtual SDNode *Select(SDNode *Node) LLVM_OVERRIDE;
- virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
- char ConstraintCode,
- std::vector<SDValue> &OutOps)
- LLVM_OVERRIDE;
+ SDNode *Select(SDNode *Node) override;
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
+ std::vector<SDValue> &OutOps) override;
// Include the pieces autogenerated from the target description.
#include "SystemZGenDAGISel.inc"
@@ -651,8 +651,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
return false;
// We need a constant mask.
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode());
if (!MaskNode)
return false;
@@ -666,7 +665,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
uint64_t Used = allOnes(Op.getValueType().getSizeInBits());
if (Used != (AndMask | InsertMask)) {
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne);
if (Used != (AndMask | InsertMask | KnownZero.getZExtValue()))
return false;
}
@@ -704,8 +703,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
if (RxSBG.Opcode == SystemZ::RNSBG)
return false;
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!MaskNode)
return false;
@@ -716,7 +714,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask |= KnownZero.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
@@ -729,8 +727,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
if (RxSBG.Opcode != SystemZ::RNSBG)
return false;
- ConstantSDNode *MaskNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!MaskNode)
return false;
@@ -741,7 +738,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask &= ~KnownOne.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
@@ -754,8 +751,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// Any 64-bit rotate left can be merged into the RxSBG.
if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64)
return false;
- ConstantSDNode *CountNode
- = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
@@ -764,9 +760,24 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
return true;
}
- case ISD::SIGN_EXTEND:
+ case ISD::ANY_EXTEND:
+ // Bits above the extended operand are don't-care.
+ RxSBG.Input = N.getOperand(0);
+ return true;
+
case ISD::ZERO_EXTEND:
- case ISD::ANY_EXTEND: {
+ if (RxSBG.Opcode != SystemZ::RNSBG) {
+ // Restrict the mask to the extended operand.
+ unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
+ if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize)))
+ return false;
+
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
+ // Fall through.
+
+ case ISD::SIGN_EXTEND: {
// Check that the extension bits are don't-care (i.e. are masked out
// by the final mask).
unsigned InnerBitSize = N.getOperand(0).getValueType().getSizeInBits();
@@ -778,8 +789,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
}
case ISD::SHL: {
- ConstantSDNode *CountNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
@@ -806,8 +816,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
case ISD::SRL:
case ISD::SRA: {
- ConstantSDNode *CountNode =
- dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
+ auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode());
if (!CountNode)
return false;
@@ -860,12 +869,12 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND)
Count += 1;
if (Count == 0)
- return 0;
+ return nullptr;
if (Count == 1) {
// Prefer to use normal shift instructions over RISBG, since they can handle
// all cases and are sometimes shorter.
if (N->getOpcode() != ISD::AND)
- return 0;
+ return nullptr;
// Prefer register extensions like LLC over RISBG. Also prefer to start
// out with normal ANDs if one instruction would be enough. We can convert
@@ -876,13 +885,13 @@ SDNode *SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) {
SystemZ::isImmLF(~RISBG.Mask) ||
SystemZ::isImmHF(~RISBG.Mask)) {
// Force the new mask into the DAG, since it may include known-one bits.
- ConstantSDNode *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
+ auto *MaskN = cast<ConstantSDNode>(N->getOperand(1).getNode());
if (MaskN->getZExtValue() != RISBG.Mask) {
SDValue NewMask = CurDAG->getConstant(RISBG.Mask, VT);
N = CurDAG->UpdateNodeOperands(N, N->getOperand(0), NewMask);
return SelectCode(N);
}
- return 0;
+ return nullptr;
}
}
@@ -920,7 +929,7 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
// Do nothing if neither operand is suitable.
if (Count[0] == 0 && Count[1] == 0)
- return 0;
+ return nullptr;
// Pick the deepest second operand.
unsigned I = Count[0] > Count[1] ? 0 : 1;
@@ -928,9 +937,9 @@ SDNode *SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) {
// Prefer IC for character insertions from memory.
if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0)
- if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
+ if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode()))
if (Load->getMemoryVT() == MVT::i8)
- return 0;
+ return nullptr;
// See whether we can avoid an AND in the first operand by converting
// ROSBG to RISBG.
@@ -979,8 +988,8 @@ bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
return true;
// Otherwise we need to check whether there's an alias.
- const Value *V1 = Load->getSrcValue();
- const Value *V2 = Store->getSrcValue();
+ const Value *V1 = Load->getMemOperand()->getValue();
+ const Value *V2 = Store->getMemOperand()->getValue();
if (!V1 || !V2)
return false;
@@ -996,8 +1005,8 @@ bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store,
}
bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
- StoreSDNode *Store = cast<StoreSDNode>(N);
- LoadSDNode *Load = cast<LoadSDNode>(Store->getValue());
+ auto *Store = cast<StoreSDNode>(N);
+ auto *Load = cast<LoadSDNode>(Store->getValue());
// Prefer not to use MVC if either address can use ... RELATIVE LONG
// instructions.
@@ -1016,9 +1025,9 @@ bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const {
bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N,
unsigned I) const {
- StoreSDNode *StoreA = cast<StoreSDNode>(N);
- LoadSDNode *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
- LoadSDNode *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
+ auto *StoreA = cast<StoreSDNode>(N);
+ auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I));
+ auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I));
return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB);
}
@@ -1030,11 +1039,11 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
if (Node->isMachineOpcode()) {
DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
Node->setNodeId(-1);
- return 0;
+ return nullptr;
}
unsigned Opcode = Node->getOpcode();
- SDNode *ResNode = 0;
+ SDNode *ResNode = nullptr;
switch (Opcode) {
case ISD::OR:
if (Node->getOperand(1).getOpcode() != ISD::Constant)
@@ -1049,7 +1058,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
// If this is a 64-bit operation in which both 32-bit halves are nonzero,
// split the operation into two.
if (!ResNode && Node->getValueType(0) == MVT::i64)
- if (ConstantSDNode *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
+ if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) {
uint64_t Val = Op1->getZExtValue();
if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val))
Node = splitLargeImmediate(Opcode, Node, Node->getOperand(0),
@@ -1064,6 +1073,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
case ISD::ROTL:
case ISD::SHL:
case ISD::SRL:
+ case ISD::ZERO_EXTEND:
if (!ResNode)
ResNode = tryRISBGZero(Node);
break;
@@ -1079,20 +1089,6 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
}
break;
- case ISD::ATOMIC_LOAD_SUB:
- // Try to convert subtractions of constants to additions.
- if (ConstantSDNode *Op2 = dyn_cast<ConstantSDNode>(Node->getOperand(2))) {
- uint64_t Value = -Op2->getZExtValue();
- EVT VT = Node->getValueType(0);
- if (VT == MVT::i32 || isInt<32>(Value)) {
- SDValue Ops[] = { Node->getOperand(0), Node->getOperand(1),
- CurDAG->getConstant(int32_t(Value), VT) };
- Node = CurDAG->MorphNodeTo(Node, ISD::ATOMIC_LOAD_ADD,
- Node->getVTList(), Ops, array_lengthof(Ops));
- }
- }
- break;
-
case SystemZISD::SELECT_CCMASK: {
SDValue Op0 = Node->getOperand(0);
SDValue Op1 = Node->getOperand(1);
@@ -1120,7 +1116,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDNode *Node) {
ResNode = SelectCode(Node);
DEBUG(errs() << "=> ";
- if (ResNode == NULL || ResNode == Node)
+ if (ResNode == nullptr || ResNode == Node)
Node->dump(CurDAG);
else
ResNode->dump(CurDAG);
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index f6e18530f4a5..00c65f5bba6b 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -11,8 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-lower"
-
#include "SystemZISelLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZConstantPoolValue.h"
@@ -22,11 +20,12 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
-
#include <cctype>
using namespace llvm;
+#define DEBUG_TYPE "systemz-lower"
+
namespace {
// Represents a sequence for extracting a 0/1 value from an IPM result:
// (((X ^ XORValue) + AddValue) >> Bit)
@@ -38,7 +37,28 @@ struct IPMConversion {
int64_t AddValue;
unsigned Bit;
};
-}
+
+// Represents information about a comparison.
+struct Comparison {
+ Comparison(SDValue Op0In, SDValue Op1In)
+ : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
+
+ // The operands to the comparison.
+ SDValue Op0, Op1;
+
+ // The opcode that should be used to compare Op0 and Op1.
+ unsigned Opcode;
+
+ // A SystemZICMP value. Only used for integer comparisons.
+ unsigned ICmpType;
+
+ // The mask of CC values that Opcode can produce.
+ unsigned CCValid;
+
+ // The mask of CC values for which the original condition is true.
+ unsigned CCMask;
+};
+} // end anonymous namespace
// Classify VT as either 32 or 64 bit.
static bool is32Bit(EVT VT) {
@@ -60,9 +80,9 @@ static MachineOperand earlyUseOperand(MachineOperand Op) {
return Op;
}
-SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
- : TargetLowering(tm, new TargetLoweringObjectFileELF()),
- Subtarget(*tm.getSubtargetImpl()), TM(tm) {
+SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &tm)
+ : TargetLowering(tm, new TargetLoweringObjectFileELF()),
+ Subtarget(tm.getSubtarget<SystemZSubtarget>()) {
MVT PtrVT = getPointerTy();
// Set up the register classes.
@@ -134,10 +154,14 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::SDIVREM, VT, Custom);
setOperationAction(ISD::UDIVREM, VT, Custom);
- // Expand ATOMIC_LOAD and ATOMIC_STORE using ATOMIC_CMP_SWAP.
- // FIXME: probably much too conservative.
- setOperationAction(ISD::ATOMIC_LOAD, VT, Expand);
- setOperationAction(ISD::ATOMIC_STORE, VT, Expand);
+ // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
+ // stores, putting a serialization instruction after the stores.
+ setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
+
+ // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
+ // available, or if the operand is constant.
+ setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
// No special instructions for these.
setOperationAction(ISD::CTPOP, VT, Expand);
@@ -152,8 +176,9 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::SMUL_LOHI, VT, Custom);
setOperationAction(ISD::UMUL_LOHI, VT, Custom);
- // We have instructions for signed but not unsigned FP conversion.
- setOperationAction(ISD::FP_TO_UINT, VT, Expand);
+ // Only z196 and above have native support for conversions to unsigned.
+ if (!Subtarget.hasFPExtension())
+ setOperationAction(ISD::FP_TO_UINT, VT, Expand);
}
}
@@ -173,10 +198,12 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
- // We have instructions for signed but not unsigned FP conversion.
+ // z10 has instructions for signed but not unsigned FP conversion.
// Handle unsigned 32-bit types as signed 64-bit types.
- setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
- setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ if (!Subtarget.hasFPExtension()) {
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
+ }
// We have native support for a 64-bit CTLZ, via FLOGR.
setOperationAction(ISD::CTLZ, MVT::i32, Promote);
@@ -266,6 +293,9 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm)
setOperationAction(ISD::VACOPY, MVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
+ // Codes for which we want to perform some z-specific combinations.
+ setTargetDAGCombine(ISD::SIGN_EXTEND);
+
// We want to use MVC in preference to even a single load/store pair.
MaxStoresPerMemcpy = 0;
MaxStoresPerMemcpyOptSize = 0;
@@ -310,6 +340,7 @@ bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
}
bool SystemZTargetLowering::allowsUnalignedMemoryAccesses(EVT VT,
+ unsigned,
bool *Fast) const {
// Unaligned accesses should never be slower than the expanded version.
// We check specifically for aligned accesses in the few cases where
@@ -393,7 +424,7 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info,
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
- if (CallOperandVal == NULL)
+ if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
@@ -416,31 +447,31 @@ getSingleConstraintMatchWeight(AsmOperandInfo &info,
break;
case 'I': // Unsigned 8-bit constant
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isUInt<8>(C->getZExtValue()))
weight = CW_Constant;
break;
case 'J': // Unsigned 12-bit constant
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isUInt<12>(C->getZExtValue()))
weight = CW_Constant;
break;
case 'K': // Signed 16-bit constant
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isInt<16>(C->getSExtValue()))
weight = CW_Constant;
break;
case 'L': // Signed 20-bit displacement (on all targets we support)
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (isInt<20>(C->getSExtValue()))
weight = CW_Constant;
break;
case 'M': // 0x7fffffff
- if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal))
+ if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
if (C->getZExtValue() == 0x7fffffff)
weight = CW_Constant;
break;
@@ -461,7 +492,7 @@ parseRegisterNumber(const std::string &Constraint,
if (Index < 16 && Map[Index])
return std::make_pair(Map[Index], RC);
}
- return std::make_pair(0u, static_cast<TargetRegisterClass*>(0));
+ return std::make_pair(0U, nullptr);
}
std::pair<unsigned, const TargetRegisterClass *> SystemZTargetLowering::
@@ -533,35 +564,35 @@ LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
if (Constraint.length() == 1) {
switch (Constraint[0]) {
case 'I': // Unsigned 8-bit constant
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<8>(C->getZExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
Op.getValueType()));
return;
case 'J': // Unsigned 12-bit constant
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isUInt<12>(C->getZExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
Op.getValueType()));
return;
case 'K': // Signed 16-bit constant
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<16>(C->getSExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
Op.getValueType()));
return;
case 'L': // Signed 20-bit displacement (on all targets we support)
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (isInt<20>(C->getSExtValue()))
Ops.push_back(DAG.getTargetConstant(C->getSExtValue(),
Op.getValueType()));
return;
case 'M': // 0x7fffffff
- if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op))
+ if (auto *C = dyn_cast<ConstantSDNode>(Op))
if (C->getZExtValue() == 0x7fffffff)
Ops.push_back(DAG.getTargetConstant(C->getZExtValue(),
Op.getValueType()));
@@ -642,12 +673,13 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
MachineRegisterInfo &MRI = MF.getRegInfo();
SystemZMachineFunctionInfo *FuncInfo =
MF.getInfo<SystemZMachineFunctionInfo>();
- const SystemZFrameLowering *TFL =
- static_cast<const SystemZFrameLowering *>(TM.getFrameLowering());
+ auto *TFL = static_cast<const SystemZFrameLowering *>(
+ DAG.getTarget().getFrameLowering());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState CCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ CCState CCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs,
+ *DAG.getContext());
CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
unsigned NumFixedGPRs = 0;
@@ -742,8 +774,8 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
}
// Join the stores, which are independent of one another.
Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOps[NumFixedFPRs],
- SystemZ::NumArgFPRs - NumFixedFPRs);
+ makeArrayRef(&MemOps[NumFixedFPRs],
+ SystemZ::NumArgFPRs-NumFixedFPRs));
}
}
@@ -785,7 +817,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Analyze the operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
- CCState ArgCCInfo(CallConv, IsVarArg, MF, TM, ArgLocs, *DAG.getContext());
+ CCState ArgCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs,
+ *DAG.getContext());
ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
// We don't support GuaranteedTailCallOpt, only automatically-detected
@@ -845,17 +878,16 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Join the stores, which are independent of one another.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- &MemOpChains[0], MemOpChains.size());
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
// Accept direct calls by converting symbolic call addresses to the
// associated Target* opcodes. Force %r1 to be used for indirect
// tail calls.
SDValue Glue;
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
- } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
} else if (IsTailCall) {
@@ -882,6 +914,12 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
Ops.push_back(DAG.getRegister(RegsToPass[I].first,
RegsToPass[I].second.getValueType()));
+ // Add a register mask operand representing the call-preserved registers.
+ const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
+ const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
// Glue the call to the argument copies, if any.
if (Glue.getNode())
Ops.push_back(Glue);
@@ -889,8 +927,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Emit the call.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
if (IsTailCall)
- return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, &Ops[0], Ops.size());
- Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, &Ops[0], Ops.size());
+ return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
+ Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
Glue = Chain.getValue(1);
// Mark the end of the call, which is glued to the call itself.
@@ -902,7 +940,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RetLocs;
- CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs,
+ *DAG.getContext());
RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
// Copy all of the result registers out of their specified physreg.
@@ -933,7 +972,8 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
// Assign locations to each returned value.
SmallVector<CCValAssign, 16> RetLocs;
- CCState RetCCInfo(CallConv, IsVarArg, MF, TM, RetLocs, *DAG.getContext());
+ CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs,
+ *DAG.getContext());
RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
// Quick exit for void returns
@@ -966,8 +1006,12 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
if (Glue.getNode())
RetOps.push_back(Glue);
- return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other,
- RetOps.data(), RetOps.size());
+ return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
+}
+
+SDValue SystemZTargetLowering::
+prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const {
+ return DAG.getNode(SystemZISD::SERIALIZE, DL, MVT::Other, Chain);
}
// CC is a comparison that will be implemented using an integer or
@@ -1044,7 +1088,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
- // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are
+ // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
// can be done by inverting the low CC bit and applying one of the
// sign-based extractions above.
if (CCMask == (CCValid & SystemZ::CCMASK_1))
@@ -1065,109 +1109,100 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
llvm_unreachable("Unexpected CC combination");
}
-// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
-// can be converted to a comparison against zero, adjust the operands
+// If C can be converted to a comparison against zero, adjust the operands
// as necessary.
-static void adjustZeroCmp(SelectionDAG &DAG, bool &IsUnsigned,
- SDValue &CmpOp0, SDValue &CmpOp1,
- unsigned &CCMask) {
- if (IsUnsigned)
+static void adjustZeroCmp(SelectionDAG &DAG, Comparison &C) {
+ if (C.ICmpType == SystemZICMP::UnsignedOnly)
return;
- ConstantSDNode *ConstOp1 = dyn_cast<ConstantSDNode>(CmpOp1.getNode());
+ auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
if (!ConstOp1)
return;
int64_t Value = ConstOp1->getSExtValue();
- if ((Value == -1 && CCMask == SystemZ::CCMASK_CMP_GT) ||
- (Value == -1 && CCMask == SystemZ::CCMASK_CMP_LE) ||
- (Value == 1 && CCMask == SystemZ::CCMASK_CMP_LT) ||
- (Value == 1 && CCMask == SystemZ::CCMASK_CMP_GE)) {
- CCMask ^= SystemZ::CCMASK_CMP_EQ;
- CmpOp1 = DAG.getConstant(0, CmpOp1.getValueType());
+ if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
+ (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
+ (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
+ (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
+ C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
+ C.Op1 = DAG.getConstant(0, C.Op1.getValueType());
}
}
-// If a comparison described by IsUnsigned, CCMask, CmpOp0 and CmpOp1
-// is suitable for CLI(Y), CHHSI or CLHHSI, adjust the operands as necessary.
-static void adjustSubwordCmp(SelectionDAG &DAG, bool &IsUnsigned,
- SDValue &CmpOp0, SDValue &CmpOp1,
- unsigned &CCMask) {
+// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
+// adjust the operands as necessary.
+static void adjustSubwordCmp(SelectionDAG &DAG, Comparison &C) {
// For us to make any changes, it must a comparison between a single-use
// load and a constant.
- if (!CmpOp0.hasOneUse() ||
- CmpOp0.getOpcode() != ISD::LOAD ||
- CmpOp1.getOpcode() != ISD::Constant)
+ if (!C.Op0.hasOneUse() ||
+ C.Op0.getOpcode() != ISD::LOAD ||
+ C.Op1.getOpcode() != ISD::Constant)
return;
// We must have an 8- or 16-bit load.
- LoadSDNode *Load = cast<LoadSDNode>(CmpOp0);
+ auto *Load = cast<LoadSDNode>(C.Op0);
unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
if (NumBits != 8 && NumBits != 16)
return;
// The load must be an extending one and the constant must be within the
// range of the unextended value.
- ConstantSDNode *Constant = cast<ConstantSDNode>(CmpOp1);
- uint64_t Value = Constant->getZExtValue();
+ auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
+ uint64_t Value = ConstOp1->getZExtValue();
uint64_t Mask = (1 << NumBits) - 1;
if (Load->getExtensionType() == ISD::SEXTLOAD) {
- int64_t SignedValue = Constant->getSExtValue();
- if (uint64_t(SignedValue) + (1ULL << (NumBits - 1)) > Mask)
+ // Make sure that ConstOp1 is in range of C.Op0.
+ int64_t SignedValue = ConstOp1->getSExtValue();
+ if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
return;
- // Unsigned comparison between two sign-extended values is equivalent
- // to unsigned comparison between two zero-extended values.
- if (IsUnsigned)
+ if (C.ICmpType != SystemZICMP::SignedOnly) {
+ // Unsigned comparison between two sign-extended values is equivalent
+ // to unsigned comparison between two zero-extended values.
Value &= Mask;
- else if (CCMask == SystemZ::CCMASK_CMP_EQ ||
- CCMask == SystemZ::CCMASK_CMP_NE)
- // Any choice of IsUnsigned is OK for equality comparisons.
- // We could use either CHHSI or CLHHSI for 16-bit comparisons,
- // but since we use CLHHSI for zero extensions, it seems better
- // to be consistent and do the same here.
- Value &= Mask, IsUnsigned = true;
- else if (NumBits == 8) {
+ } else if (NumBits == 8) {
// Try to treat the comparison as unsigned, so that we can use CLI.
// Adjust CCMask and Value as necessary.
- if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_LT)
+ if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
// Test whether the high bit of the byte is set.
- Value = 127, CCMask = SystemZ::CCMASK_CMP_GT, IsUnsigned = true;
- else if (Value == 0 && CCMask == SystemZ::CCMASK_CMP_GE)
+ Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
+ else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
// Test whether the high bit of the byte is clear.
- Value = 128, CCMask = SystemZ::CCMASK_CMP_LT, IsUnsigned = true;
+ Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
else
// No instruction exists for this combination.
return;
+ C.ICmpType = SystemZICMP::UnsignedOnly;
}
} else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
if (Value > Mask)
return;
- // Signed comparison between two zero-extended values is equivalent
- // to unsigned comparison.
- IsUnsigned = true;
+ assert(C.ICmpType == SystemZICMP::Any &&
+ "Signedness shouldn't matter here.");
} else
return;
// Make sure that the first operand is an i32 of the right extension type.
- ISD::LoadExtType ExtType = IsUnsigned ? ISD::ZEXTLOAD : ISD::SEXTLOAD;
- if (CmpOp0.getValueType() != MVT::i32 ||
+ ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
+ ISD::SEXTLOAD :
+ ISD::ZEXTLOAD);
+ if (C.Op0.getValueType() != MVT::i32 ||
Load->getExtensionType() != ExtType)
- CmpOp0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
- Load->getChain(), Load->getBasePtr(),
- Load->getPointerInfo(), Load->getMemoryVT(),
- Load->isVolatile(), Load->isNonTemporal(),
- Load->getAlignment());
+ C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32,
+ Load->getChain(), Load->getBasePtr(),
+ Load->getPointerInfo(), Load->getMemoryVT(),
+ Load->isVolatile(), Load->isNonTemporal(),
+ Load->getAlignment());
// Make sure that the second operand is an i32 with the right value.
- if (CmpOp1.getValueType() != MVT::i32 ||
- Value != Constant->getZExtValue())
- CmpOp1 = DAG.getConstant(Value, MVT::i32);
+ if (C.Op1.getValueType() != MVT::i32 ||
+ Value != ConstOp1->getZExtValue())
+ C.Op1 = DAG.getConstant(Value, MVT::i32);
}
// Return true if Op is either an unextended load, or a load suitable
// for integer register-memory comparisons of type ICmpType.
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
- LoadSDNode *Load = dyn_cast<LoadSDNode>(Op.getNode());
+ auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
if (Load) {
// There are no instructions to compare a register with a memory byte.
if (Load->getMemoryVT() == MVT::i8)
@@ -1187,53 +1222,163 @@ static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
return false;
}
-// Return true if it is better to swap comparison operands Op0 and Op1.
-// ICmpType is the type of an integer comparison.
-static bool shouldSwapCmpOperands(SDValue Op0, SDValue Op1,
- unsigned ICmpType) {
+// Return true if it is better to swap the operands of C.
+static bool shouldSwapCmpOperands(const Comparison &C) {
// Leave f128 comparisons alone, since they have no memory forms.
- if (Op0.getValueType() == MVT::f128)
+ if (C.Op0.getValueType() == MVT::f128)
return false;
// Always keep a floating-point constant second, since comparisons with
// zero can use LOAD TEST and comparisons with other constants make a
// natural memory operand.
- if (isa<ConstantFPSDNode>(Op1))
+ if (isa<ConstantFPSDNode>(C.Op1))
return false;
// Never swap comparisons with zero since there are many ways to optimize
// those later.
- ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
- if (COp1 && COp1->getZExtValue() == 0)
+ auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
+ if (ConstOp1 && ConstOp1->getZExtValue() == 0)
+ return false;
+
+ // Also keep natural memory operands second if the loaded value is
+ // only used here. Several comparisons have memory forms.
+ if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
return false;
// Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
// In that case we generally prefer the memory to be second.
- if ((isNaturalMemoryOperand(Op0, ICmpType) && Op0.hasOneUse()) &&
- !(isNaturalMemoryOperand(Op1, ICmpType) && Op1.hasOneUse())) {
+ if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
// The only exceptions are when the second operand is a constant and
// we can use things like CHHSI.
- if (!COp1)
+ if (!ConstOp1)
return true;
// The unsigned memory-immediate instructions can handle 16-bit
// unsigned integers.
- if (ICmpType != SystemZICMP::SignedOnly &&
- isUInt<16>(COp1->getZExtValue()))
+ if (C.ICmpType != SystemZICMP::SignedOnly &&
+ isUInt<16>(ConstOp1->getZExtValue()))
return false;
// The signed memory-immediate instructions can handle 16-bit
// signed integers.
- if (ICmpType != SystemZICMP::UnsignedOnly &&
- isInt<16>(COp1->getSExtValue()))
+ if (C.ICmpType != SystemZICMP::UnsignedOnly &&
+ isInt<16>(ConstOp1->getSExtValue()))
return false;
return true;
}
+
+ // Try to promote the use of CGFR and CLGFR.
+ unsigned Opcode0 = C.Op0.getOpcode();
+ if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
+ return true;
+ if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
+ return true;
+ if (C.ICmpType != SystemZICMP::SignedOnly &&
+ Opcode0 == ISD::AND &&
+ C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
+ return true;
+
return false;
}
+// Return a version of comparison CC mask CCMask in which the LT and GT
+// actions are swapped.
+static unsigned reverseCCMask(unsigned CCMask) {
+ return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
+ (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
+ (CCMask & SystemZ::CCMASK_CMP_UO));
+}
+
+// Check whether C tests for equality between X and Y and whether X - Y
+// or Y - X is also computed. In that case it's better to compare the
+// result of the subtraction against zero.
+static void adjustForSubtraction(SelectionDAG &DAG, Comparison &C) {
+ if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
+ C.CCMask == SystemZ::CCMASK_CMP_NE) {
+ for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
+ SDNode *N = *I;
+ if (N->getOpcode() == ISD::SUB &&
+ ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
+ (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
+ C.Op0 = SDValue(N, 0);
+ C.Op1 = DAG.getConstant(0, N->getValueType(0));
+ return;
+ }
+ }
+ }
+}
+
+// Check whether C compares a floating-point value with zero and if that
+// floating-point value is also negated. In this case we can use the
+// negation to set CC, so avoiding separate LOAD AND TEST and
+// LOAD (NEGATIVE/COMPLEMENT) instructions.
+static void adjustForFNeg(Comparison &C) {
+ auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
+ if (C1 && C1->isZero()) {
+ for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
+ SDNode *N = *I;
+ if (N->getOpcode() == ISD::FNEG) {
+ C.Op0 = SDValue(N, 0);
+ C.CCMask = reverseCCMask(C.CCMask);
+ return;
+ }
+ }
+ }
+}
+
+// Check whether C compares (shl X, 32) with 0 and whether X is
+// also sign-extended. In that case it is better to test the result
+// of the sign extension using LTGFR.
+//
+// This case is important because InstCombine transforms a comparison
+// with (sext (trunc X)) into a comparison with (shl X, 32).
+static void adjustForLTGFR(Comparison &C) {
+ // Check for a comparison between (shl X, 32) and 0.
+ if (C.Op0.getOpcode() == ISD::SHL &&
+ C.Op0.getValueType() == MVT::i64 &&
+ C.Op1.getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
+ if (C1 && C1->getZExtValue() == 32) {
+ SDValue ShlOp0 = C.Op0.getOperand(0);
+ // See whether X has any SIGN_EXTEND_INREG uses.
+ for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
+ SDNode *N = *I;
+ if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
+ C.Op0 = SDValue(N, 0);
+ return;
+ }
+ }
+ }
+ }
+}
+
+// If C compares the truncation of an extending load, try to compare
+// the untruncated value instead. This exposes more opportunities to
+// reuse CC.
+static void adjustICmpTruncate(SelectionDAG &DAG, Comparison &C) {
+ if (C.Op0.getOpcode() == ISD::TRUNCATE &&
+ C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
+ C.Op1.getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
+ if (L->getMemoryVT().getStoreSizeInBits()
+ <= C.Op0.getValueType().getSizeInBits()) {
+ unsigned Type = L->getExtensionType();
+ if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
+ (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
+ C.Op0 = C.Op0.getOperand(0);
+ C.Op1 = DAG.getConstant(0, C.Op0.getValueType());
+ }
+ }
+ }
+}
+
// Return true if shift operation N has an in-range constant shift value.
// Store it in ShiftVal if so.
static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
- ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
if (!Shift)
return false;
@@ -1341,118 +1486,143 @@ static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
return 0;
}
-// See whether the comparison (Opcode CmpOp0, CmpOp1, ICmpType) can be
-// implemented as a TEST UNDER MASK instruction when the condition being
-// tested is as described by CCValid and CCMask. Update the arguments
-// with the TM version if so.
-static void adjustForTestUnderMask(SelectionDAG &DAG, unsigned &Opcode,
- SDValue &CmpOp0, SDValue &CmpOp1,
- unsigned &CCValid, unsigned &CCMask,
- unsigned &ICmpType) {
+// See whether C can be implemented as a TEST UNDER MASK instruction.
+// Update the arguments with the TM version if so.
+static void adjustForTestUnderMask(SelectionDAG &DAG, Comparison &C) {
// Check that we have a comparison with a constant.
- ConstantSDNode *ConstCmpOp1 = dyn_cast<ConstantSDNode>(CmpOp1);
- if (!ConstCmpOp1)
+ auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
+ if (!ConstOp1)
return;
- uint64_t CmpVal = ConstCmpOp1->getZExtValue();
+ uint64_t CmpVal = ConstOp1->getZExtValue();
// Check whether the nonconstant input is an AND with a constant mask.
- if (CmpOp0.getOpcode() != ISD::AND)
- return;
- SDValue AndOp0 = CmpOp0.getOperand(0);
- SDValue AndOp1 = CmpOp0.getOperand(1);
- ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(AndOp1.getNode());
- if (!Mask)
- return;
- uint64_t MaskVal = Mask->getZExtValue();
+ Comparison NewC(C);
+ uint64_t MaskVal;
+ ConstantSDNode *Mask = nullptr;
+ if (C.Op0.getOpcode() == ISD::AND) {
+ NewC.Op0 = C.Op0.getOperand(0);
+ NewC.Op1 = C.Op0.getOperand(1);
+ Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
+ if (!Mask)
+ return;
+ MaskVal = Mask->getZExtValue();
+ } else {
+ // There is no instruction to compare with a 64-bit immediate
+ // so use TMHH instead if possible. We need an unsigned ordered
+ // comparison with an i64 immediate.
+ if (NewC.Op0.getValueType() != MVT::i64 ||
+ NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
+ NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
+ NewC.ICmpType == SystemZICMP::SignedOnly)
+ return;
+ // Convert LE and GT comparisons into LT and GE.
+ if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
+ NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
+ if (CmpVal == uint64_t(-1))
+ return;
+ CmpVal += 1;
+ NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
+ }
+ // If the low N bits of Op1 are zero than the low N bits of Op0 can
+ // be masked off without changing the result.
+ MaskVal = -(CmpVal & -CmpVal);
+ NewC.ICmpType = SystemZICMP::UnsignedOnly;
+ }
// Check whether the combination of mask, comparison value and comparison
// type are suitable.
- unsigned BitSize = CmpOp0.getValueType().getSizeInBits();
+ unsigned BitSize = NewC.Op0.getValueType().getSizeInBits();
unsigned NewCCMask, ShiftVal;
- if (ICmpType != SystemZICMP::SignedOnly &&
- AndOp0.getOpcode() == ISD::SHL &&
- isSimpleShift(AndOp0, ShiftVal) &&
- (NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal >> ShiftVal,
+ if (NewC.ICmpType != SystemZICMP::SignedOnly &&
+ NewC.Op0.getOpcode() == ISD::SHL &&
+ isSimpleShift(NewC.Op0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
+ MaskVal >> ShiftVal,
CmpVal >> ShiftVal,
SystemZICMP::Any))) {
- AndOp0 = AndOp0.getOperand(0);
- AndOp1 = DAG.getConstant(MaskVal >> ShiftVal, AndOp0.getValueType());
- } else if (ICmpType != SystemZICMP::SignedOnly &&
- AndOp0.getOpcode() == ISD::SRL &&
- isSimpleShift(AndOp0, ShiftVal) &&
- (NewCCMask = getTestUnderMaskCond(BitSize, CCMask,
+ NewC.Op0 = NewC.Op0.getOperand(0);
+ MaskVal >>= ShiftVal;
+ } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
+ NewC.Op0.getOpcode() == ISD::SRL &&
+ isSimpleShift(NewC.Op0, ShiftVal) &&
+ (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
MaskVal << ShiftVal,
CmpVal << ShiftVal,
SystemZICMP::UnsignedOnly))) {
- AndOp0 = AndOp0.getOperand(0);
- AndOp1 = DAG.getConstant(MaskVal << ShiftVal, AndOp0.getValueType());
+ NewC.Op0 = NewC.Op0.getOperand(0);
+ MaskVal <<= ShiftVal;
} else {
- NewCCMask = getTestUnderMaskCond(BitSize, CCMask, MaskVal, CmpVal,
- ICmpType);
+ NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
+ NewC.ICmpType);
if (!NewCCMask)
return;
}
// Go ahead and make the change.
- Opcode = SystemZISD::TM;
- CmpOp0 = AndOp0;
- CmpOp1 = AndOp1;
- ICmpType = (bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
- bool(NewCCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
- CCValid = SystemZ::CCMASK_TM;
- CCMask = NewCCMask;
-}
-
-// Return a target node that compares CmpOp0 with CmpOp1 and stores a
-// 2-bit result in CC. Set CCValid to the CCMASK_* of all possible
-// 2-bit results and CCMask to the subset of those results that are
-// associated with Cond.
-static SDValue emitCmp(const SystemZTargetMachine &TM, SelectionDAG &DAG,
- SDLoc DL, SDValue CmpOp0, SDValue CmpOp1,
- ISD::CondCode Cond, unsigned &CCValid,
- unsigned &CCMask) {
- bool IsUnsigned = false;
- CCMask = CCMaskForCondCode(Cond);
- unsigned Opcode, ICmpType = 0;
- if (CmpOp0.getValueType().isFloatingPoint()) {
- CCValid = SystemZ::CCMASK_FCMP;
- Opcode = SystemZISD::FCMP;
+ C.Opcode = SystemZISD::TM;
+ C.Op0 = NewC.Op0;
+ if (Mask && Mask->getZExtValue() == MaskVal)
+ C.Op1 = SDValue(Mask, 0);
+ else
+ C.Op1 = DAG.getConstant(MaskVal, C.Op0.getValueType());
+ C.CCValid = SystemZ::CCMASK_TM;
+ C.CCMask = NewCCMask;
+}
+
+// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
+static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
+ ISD::CondCode Cond) {
+ Comparison C(CmpOp0, CmpOp1);
+ C.CCMask = CCMaskForCondCode(Cond);
+ if (C.Op0.getValueType().isFloatingPoint()) {
+ C.CCValid = SystemZ::CCMASK_FCMP;
+ C.Opcode = SystemZISD::FCMP;
+ adjustForFNeg(C);
} else {
- IsUnsigned = CCMask & SystemZ::CCMASK_CMP_UO;
- CCValid = SystemZ::CCMASK_ICMP;
- CCMask &= CCValid;
- adjustZeroCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
- adjustSubwordCmp(DAG, IsUnsigned, CmpOp0, CmpOp1, CCMask);
- Opcode = SystemZISD::ICMP;
+ C.CCValid = SystemZ::CCMASK_ICMP;
+ C.Opcode = SystemZISD::ICMP;
// Choose the type of comparison. Equality and inequality tests can
// use either signed or unsigned comparisons. The choice also doesn't
// matter if both sign bits are known to be clear. In those cases we
// want to give the main isel code the freedom to choose whichever
// form fits best.
- if (CCMask == SystemZ::CCMASK_CMP_EQ ||
- CCMask == SystemZ::CCMASK_CMP_NE ||
- (DAG.SignBitIsZero(CmpOp0) && DAG.SignBitIsZero(CmpOp1)))
- ICmpType = SystemZICMP::Any;
- else if (IsUnsigned)
- ICmpType = SystemZICMP::UnsignedOnly;
+ if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
+ C.CCMask == SystemZ::CCMASK_CMP_NE ||
+ (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
+ C.ICmpType = SystemZICMP::Any;
+ else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
+ C.ICmpType = SystemZICMP::UnsignedOnly;
else
- ICmpType = SystemZICMP::SignedOnly;
+ C.ICmpType = SystemZICMP::SignedOnly;
+ C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
+ adjustZeroCmp(DAG, C);
+ adjustSubwordCmp(DAG, C);
+ adjustForSubtraction(DAG, C);
+ adjustForLTGFR(C);
+ adjustICmpTruncate(DAG, C);
}
- if (shouldSwapCmpOperands(CmpOp0, CmpOp1, ICmpType)) {
- std::swap(CmpOp0, CmpOp1);
- CCMask = ((CCMask & SystemZ::CCMASK_CMP_EQ) |
- (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
- (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
- (CCMask & SystemZ::CCMASK_CMP_UO));
+ if (shouldSwapCmpOperands(C)) {
+ std::swap(C.Op0, C.Op1);
+ C.CCMask = reverseCCMask(C.CCMask);
}
- adjustForTestUnderMask(DAG, Opcode, CmpOp0, CmpOp1, CCValid, CCMask,
- ICmpType);
- if (Opcode == SystemZISD::ICMP || Opcode == SystemZISD::TM)
- return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1,
- DAG.getConstant(ICmpType, MVT::i32));
- return DAG.getNode(Opcode, DL, MVT::Glue, CmpOp0, CmpOp1);
+ adjustForTestUnderMask(DAG, C);
+ return C;
+}
+
+// Emit the comparison instruction described by C.
+static SDValue emitCmp(SelectionDAG &DAG, SDLoc DL, Comparison &C) {
+ if (C.Opcode == SystemZISD::ICMP)
+ return DAG.getNode(SystemZISD::ICMP, DL, MVT::Glue, C.Op0, C.Op1,
+ DAG.getConstant(C.ICmpType, MVT::i32));
+ if (C.Opcode == SystemZISD::TM) {
+ bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
+ bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
+ return DAG.getNode(SystemZISD::TM, DL, MVT::Glue, C.Op0, C.Op1,
+ DAG.getConstant(RegisterOnly, MVT::i32));
+ }
+ return DAG.getNode(C.Opcode, DL, MVT::Glue, C.Op0, C.Op1);
}
// Implement a 32-bit *MUL_LOHI operation by extending both operands to
@@ -1486,16 +1656,11 @@ static void lowerGR128Binary(SelectionDAG &DAG, SDLoc DL, EVT VT,
Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
}
-SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
- SelectionDAG &DAG) const {
- SDValue CmpOp0 = Op.getOperand(0);
- SDValue CmpOp1 = Op.getOperand(1);
- ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
- SDLoc DL(Op);
-
- unsigned CCValid, CCMask;
- SDValue Glue = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
-
+// Return an i32 value that is 1 if the CC value produced by Glue is
+// in the mask CCMask and 0 otherwise. CC is known to have a value
+// in CCValid, so other values can be ignored.
+static SDValue emitSETCC(SelectionDAG &DAG, SDLoc DL, SDValue Glue,
+ unsigned CCValid, unsigned CCMask) {
IPMConversion Conversion = getIPMConversion(CCValid, CCMask);
SDValue Result = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, Glue);
@@ -1516,6 +1681,18 @@ SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
return Result;
}
+SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CmpOp0 = Op.getOperand(0);
+ SDValue CmpOp1 = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+ SDLoc DL(Op);
+
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+ SDValue Glue = emitCmp(DAG, DL, C);
+ return emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
+}
+
SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
@@ -1524,11 +1701,33 @@ SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
SDValue Dest = Op.getOperand(4);
SDLoc DL(Op);
- unsigned CCValid, CCMask;
- SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+ SDValue Glue = emitCmp(DAG, DL, C);
return DAG.getNode(SystemZISD::BR_CCMASK, DL, Op.getValueType(),
- Chain, DAG.getConstant(CCValid, MVT::i32),
- DAG.getConstant(CCMask, MVT::i32), Dest, Flags);
+ Chain, DAG.getConstant(C.CCValid, MVT::i32),
+ DAG.getConstant(C.CCMask, MVT::i32), Dest, Glue);
+}
+
+// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
+// allowing Pos and Neg to be wider than CmpOp.
+static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
+ return (Neg.getOpcode() == ISD::SUB &&
+ Neg.getOperand(0).getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
+ Neg.getOperand(1) == Pos &&
+ (Pos == CmpOp ||
+ (Pos.getOpcode() == ISD::SIGN_EXTEND &&
+ Pos.getOperand(0) == CmpOp)));
+}
+
+// Return the absolute or negative absolute of Op; IsNegative decides which.
+static SDValue getAbsolute(SelectionDAG &DAG, SDLoc DL, SDValue Op,
+ bool IsNegative) {
+ Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
+ if (IsNegative)
+ Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
+ DAG.getConstant(0, Op.getValueType()), Op);
+ return Op;
}
SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
@@ -1540,18 +1739,56 @@ SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
SDLoc DL(Op);
- unsigned CCValid, CCMask;
- SDValue Flags = emitCmp(TM, DAG, DL, CmpOp0, CmpOp1, CC, CCValid, CCMask);
+ Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC));
+
+ // Check for absolute and negative-absolute selections, including those
+ // where the comparison value is sign-extended (for LPGFR and LNGFR).
+ // This check supplements the one in DAGCombiner.
+ if (C.Opcode == SystemZISD::ICMP &&
+ C.CCMask != SystemZ::CCMASK_CMP_EQ &&
+ C.CCMask != SystemZ::CCMASK_CMP_NE &&
+ C.Op1.getOpcode() == ISD::Constant &&
+ cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
+ if (isAbsolute(C.Op0, TrueOp, FalseOp))
+ return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
+ if (isAbsolute(C.Op0, FalseOp, TrueOp))
+ return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
+ }
+
+ SDValue Glue = emitCmp(DAG, DL, C);
+
+ // Special case for handling -1/0 results. The shifts we use here
+ // should get optimized with the IPM conversion sequence.
+ auto *TrueC = dyn_cast<ConstantSDNode>(TrueOp);
+ auto *FalseC = dyn_cast<ConstantSDNode>(FalseOp);
+ if (TrueC && FalseC) {
+ int64_t TrueVal = TrueC->getSExtValue();
+ int64_t FalseVal = FalseC->getSExtValue();
+ if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) {
+ // Invert the condition if we want -1 on false.
+ if (TrueVal == 0)
+ C.CCMask ^= C.CCValid;
+ SDValue Result = emitSETCC(DAG, DL, Glue, C.CCValid, C.CCMask);
+ EVT VT = Op.getValueType();
+ // Extend the result to VT. Upper bits are ignored.
+ if (!is32Bit(VT))
+ Result = DAG.getNode(ISD::ANY_EXTEND, DL, VT, Result);
+ // Sign-extend from the low bit.
+ SDValue ShAmt = DAG.getConstant(VT.getSizeInBits() - 1, MVT::i32);
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Result, ShAmt);
+ return DAG.getNode(ISD::SRA, DL, VT, Shl, ShAmt);
+ }
+ }
SmallVector<SDValue, 5> Ops;
Ops.push_back(TrueOp);
Ops.push_back(FalseOp);
- Ops.push_back(DAG.getConstant(CCValid, MVT::i32));
- Ops.push_back(DAG.getConstant(CCMask, MVT::i32));
- Ops.push_back(Flags);
+ Ops.push_back(DAG.getConstant(C.CCValid, MVT::i32));
+ Ops.push_back(DAG.getConstant(C.CCMask, MVT::i32));
+ Ops.push_back(Glue);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
- return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+ return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
}
SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
@@ -1560,8 +1797,8 @@ SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
const GlobalValue *GV = Node->getGlobal();
int64_t Offset = Node->getOffset();
EVT PtrVT = getPointerTy();
- Reloc::Model RM = TM.getRelocationModel();
- CodeModel::Model CM = TM.getCodeModel();
+ Reloc::Model RM = DAG.getTarget().getRelocationModel();
+ CodeModel::Model CM = DAG.getTarget().getCodeModel();
SDValue Result;
if (Subtarget.isPC32DBLSymbol(GV, RM, CM)) {
@@ -1598,7 +1835,7 @@ SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
SDLoc DL(Node);
const GlobalValue *GV = Node->getGlobal();
EVT PtrVT = getPointerTy();
- TLSModel::Model model = TM.getTLSModel(GV);
+ TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
if (model != TLSModel::LocalExec)
llvm_unreachable("only local-exec TLS mode supported");
@@ -1743,7 +1980,7 @@ SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
false, false, 0);
Offset += 8;
}
- return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps, NumFields);
+ return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
@@ -1784,7 +2021,7 @@ lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
SDValue Ops[2] = { Result, Chain };
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
@@ -1826,7 +2063,7 @@ SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
}
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
@@ -1845,7 +2082,7 @@ SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
// low half first, so the results are in reverse order.
lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, SystemZISD::UMUL_LOHI64,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
@@ -1872,7 +2109,7 @@ SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
SDValue Ops[2];
lowerGR128Binary(DAG, DL, VT, SystemZ::AEXT128_64, Opcode,
Op0, Op1, Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
@@ -1890,7 +2127,7 @@ SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
else
lowerGR128Binary(DAG, DL, VT, SystemZ::ZEXT128_64, SystemZISD::UDIVREM64,
Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
- return DAG.getMergeValues(Ops, 2, DL);
+ return DAG.getMergeValues(Ops, DL);
}
SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
@@ -1899,8 +2136,8 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// Get the known-zero masks for each operand.
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
APInt KnownZero[2], KnownOne[2];
- DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
- DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
+ DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]);
+ DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]);
// See if the upper 32 bits of one operand and the lower 32 bits of the
// other are known zero. They are the low and high operands respectively.
@@ -1949,12 +2186,33 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
MVT::i64, HighOp, Low32);
}
+// Op is an atomic load. Lower it into a normal volatile load.
+SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
+ return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
+ Node->getChain(), Node->getBasePtr(),
+ Node->getMemoryVT(), Node->getMemOperand());
+}
+
+// Op is an atomic store. Lower it into a normal volatile store followed
+// by a serialization.
+SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
+ SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
+ Node->getBasePtr(), Node->getMemoryVT(),
+ Node->getMemOperand());
+ return SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op), MVT::Other,
+ Chain), 0);
+}
+
// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
-SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
- SelectionDAG &DAG,
- unsigned Opcode) const {
- AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
+SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
+ SelectionDAG &DAG,
+ unsigned Opcode) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
// 32-bit operations need no code outside the main loop.
EVT NarrowVT = Node->getMemoryVT();
@@ -1972,7 +2230,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
// Convert atomic subtracts of constants into additions.
if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
- if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Src2)) {
+ if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
Opcode = SystemZISD::ATOMIC_LOADW_ADD;
Src2 = DAG.getConstant(-Const->getSExtValue(), Src2.getValueType());
}
@@ -2010,7 +2268,6 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
DAG.getConstant(BitSize, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
- array_lengthof(Ops),
NarrowVT, MMO);
// Rotate the result of the final CS so that the field is in the lower
@@ -2020,14 +2277,52 @@ SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
- return DAG.getMergeValues(RetOps, 2, DL);
+ return DAG.getMergeValues(RetOps, DL);
+}
+
+// Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
+// into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
+// operations into additions.
+SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
+ SelectionDAG &DAG) const {
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
+ EVT MemVT = Node->getMemoryVT();
+ if (MemVT == MVT::i32 || MemVT == MVT::i64) {
+ // A full-width operation.
+ assert(Op.getValueType() == MemVT && "Mismatched VTs");
+ SDValue Src2 = Node->getVal();
+ SDValue NegSrc2;
+ SDLoc DL(Src2);
+
+ if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
+ // Use an addition if the operand is constant and either LAA(G) is
+ // available or the negative value is in the range of A(G)FHI.
+ int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
+ if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
+ NegSrc2 = DAG.getConstant(Value, MemVT);
+ } else if (Subtarget.hasInterlockedAccess1())
+ // Use LAA(G) if available.
+ NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, MemVT),
+ Src2);
+
+ if (NegSrc2.getNode())
+ return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
+ Node->getChain(), Node->getBasePtr(), NegSrc2,
+ Node->getMemOperand(), Node->getOrdering(),
+ Node->getSynchScope());
+
+ // Use the node as-is.
+ return Op;
+ }
+
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
}
// Node is an 8- or 16-bit ATOMIC_CMP_SWAP operation. Lower the first two
// into a fullword ATOMIC_CMP_SWAPW operation.
SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
SelectionDAG &DAG) const {
- AtomicSDNode *Node = cast<AtomicSDNode>(Op.getNode());
+ auto *Node = cast<AtomicSDNode>(Op.getNode());
// We have native support for 32-bit compare and swap.
EVT NarrowVT = Node->getMemoryVT();
@@ -2064,8 +2359,7 @@ SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
NegBitShift, DAG.getConstant(BitSize, WideVT) };
SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
- VTList, Ops, array_lengthof(Ops),
- NarrowVT, MMO);
+ VTList, Ops, NarrowVT, MMO);
return AtomicOp;
}
@@ -2094,14 +2388,14 @@ SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
- MemIntrinsicSDNode *Node = cast<MemIntrinsicSDNode>(Op.getNode());
+ auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
SDValue Ops[] = {
Op.getOperand(0),
DAG.getConstant(Code, MVT::i32),
Op.getOperand(1)
};
return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, SDLoc(Op),
- Node->getVTList(), Ops, array_lengthof(Ops),
+ Node->getVTList(), Ops,
Node->getMemoryVT(), Node->getMemOperand());
}
@@ -2143,27 +2437,31 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
case ISD::OR:
return lowerOR(Op, DAG);
case ISD::ATOMIC_SWAP:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_SWAPW);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
+ case ISD::ATOMIC_STORE:
+ return lowerATOMIC_STORE(Op, DAG);
+ case ISD::ATOMIC_LOAD:
+ return lowerATOMIC_LOAD(Op, DAG);
case ISD::ATOMIC_LOAD_ADD:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
case ISD::ATOMIC_LOAD_SUB:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
+ return lowerATOMIC_LOAD_SUB(Op, DAG);
case ISD::ATOMIC_LOAD_AND:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
case ISD::ATOMIC_LOAD_OR:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
case ISD::ATOMIC_LOAD_XOR:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
case ISD::ATOMIC_LOAD_NAND:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
case ISD::ATOMIC_LOAD_MIN:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
case ISD::ATOMIC_LOAD_MAX:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
case ISD::ATOMIC_LOAD_UMIN:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
case ISD::ATOMIC_LOAD_UMAX:
- return lowerATOMIC_LOAD(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
+ return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
case ISD::ATOMIC_CMP_SWAP:
return lowerATOMIC_CMP_SWAP(Op, DAG);
case ISD::STACKSAVE:
@@ -2185,6 +2483,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(SIBCALL);
OPCODE(PCREL_WRAPPER);
OPCODE(PCREL_OFFSET);
+ OPCODE(IABS);
OPCODE(ICMP);
OPCODE(FCMP);
OPCODE(TM);
@@ -2210,6 +2509,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(STPCPY);
OPCODE(SEARCH_STRING);
OPCODE(IPM);
+ OPCODE(SERIALIZE);
OPCODE(ATOMIC_SWAPW);
OPCODE(ATOMIC_LOADW_ADD);
OPCODE(ATOMIC_LOADW_SUB);
@@ -2224,10 +2524,43 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
OPCODE(ATOMIC_CMP_SWAPW);
OPCODE(PREFETCH);
}
- return NULL;
+ return nullptr;
#undef OPCODE
}
+SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+ unsigned Opcode = N->getOpcode();
+ if (Opcode == ISD::SIGN_EXTEND) {
+ // Convert (sext (ashr (shl X, C1), C2)) to
+ // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
+ // cheap as narrower ones.
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+ if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
+ auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ SDValue Inner = N0.getOperand(0);
+ if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
+ if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
+ unsigned Extra = (VT.getSizeInBits() -
+ N0.getValueType().getSizeInBits());
+ unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
+ unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
+ EVT ShiftVT = N0.getOperand(1).getValueType();
+ SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
+ Inner.getOperand(0));
+ SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
+ DAG.getConstant(NewShlAmt, ShiftVT));
+ return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
+ DAG.getConstant(NewSraAmt, ShiftVT));
+ }
+ }
+ }
+ }
+ return SDValue();
+}
+
//===----------------------------------------------------------------------===//
// Custom insertion
//===----------------------------------------------------------------------===//
@@ -2236,7 +2569,7 @@ const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
static MachineBasicBlock *emitBlockAfter(MachineBasicBlock *MBB) {
MachineFunction &MF = *MBB->getParent();
MachineBasicBlock *NewMBB = MF.CreateMachineBasicBlock(MBB->getBasicBlock());
- MF.insert(llvm::next(MachineFunction::iterator(MBB)), NewMBB);
+ MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
return NewMBB;
}
@@ -2246,8 +2579,7 @@ static MachineBasicBlock *splitBlockAfter(MachineInstr *MI,
MachineBasicBlock *MBB) {
MachineBasicBlock *NewMBB = emitBlockAfter(MBB);
NewMBB->splice(NewMBB->begin(), MBB,
- llvm::next(MachineBasicBlock::iterator(MI)),
- MBB->end());
+ std::next(MachineBasicBlock::iterator(MI)), MBB->end());
NewMBB->transferSuccessorsAndUpdatePHIs(MBB);
return NewMBB;
}
@@ -2281,7 +2613,8 @@ static unsigned forceReg(MachineInstr *MI, MachineOperand &Base,
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
+ const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
+ MBB->getParent()->getTarget().getInstrInfo());
unsigned DestReg = MI->getOperand(0).getReg();
unsigned TrueReg = MI->getOperand(1).getReg();
@@ -2329,7 +2662,8 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned StoreOpcode, unsigned STOCOpcode,
bool Invert) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
+ const SystemZInstrInfo *TII = static_cast<const SystemZInstrInfo *>(
+ MBB->getParent()->getTarget().getInstrInfo());
unsigned SrcReg = MI->getOperand(0).getReg();
MachineOperand Base = MI->getOperand(1);
@@ -2344,7 +2678,7 @@ SystemZTargetLowering::emitCondStore(MachineInstr *MI,
// Use STOCOpcode if possible. We could use different store patterns in
// order to avoid matching the index register, but the performance trade-offs
// might be more complicated in that case.
- if (STOCOpcode && !IndexReg && TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
if (Invert)
CCMask ^= CCValid;
BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
@@ -2396,8 +2730,9 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
unsigned BinOpcode,
unsigned BitSize,
bool Invert) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
@@ -2519,8 +2854,9 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
unsigned CompareOpcode,
unsigned KeepOldMask,
unsigned BitSize) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
bool IsSubWord = (BitSize < 32);
@@ -2630,8 +2966,9 @@ SystemZTargetLowering::emitAtomicLoadMinMax(MachineInstr *MI,
MachineBasicBlock *
SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr *MI,
MachineBasicBlock *MBB) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
// Extract the operands. Base can be a register or a frame index.
@@ -2746,8 +3083,9 @@ MachineBasicBlock *
SystemZTargetLowering::emitExt128(MachineInstr *MI,
MachineBasicBlock *MBB,
bool ClearEven, unsigned SubReg) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -2777,8 +3115,9 @@ MachineBasicBlock *
SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned Opcode) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
@@ -2791,7 +3130,7 @@ SystemZTargetLowering::emitMemMemWrapper(MachineInstr *MI,
// When generating more than one CLC, all but the last will need to
// branch to the end when a difference is found.
MachineBasicBlock *EndMBB = (Length > 256 && Opcode == SystemZ::CLC ?
- splitBlockAfter(MI, MBB) : 0);
+ splitBlockAfter(MI, MBB) : nullptr);
// Check for the loop form, in which operand 5 is the trip count.
if (MI->getNumExplicitOperands() > 5) {
@@ -2946,8 +3285,9 @@ MachineBasicBlock *
SystemZTargetLowering::emitStringWrapper(MachineInstr *MI,
MachineBasicBlock *MBB,
unsigned Opcode) const {
- const SystemZInstrInfo *TII = TM.getInstrInfo();
MachineFunction &MF = *MBB->getParent();
+ const SystemZInstrInfo *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
MachineRegisterInfo &MRI = MF.getRegInfo();
DebugLoc DL = MI->getDebugLoc();
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index c6dcca6982a6..e21b0501933f 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -22,232 +22,233 @@
namespace llvm {
namespace SystemZISD {
- enum {
- FIRST_NUMBER = ISD::BUILTIN_OP_END,
-
- // Return with a flag operand. Operand 0 is the chain operand.
- RET_FLAG,
-
- // Calls a function. Operand 0 is the chain operand and operand 1
- // is the target address. The arguments start at operand 2.
- // There is an optional glue operand at the end.
- CALL,
- SIBCALL,
-
- // Wraps a TargetGlobalAddress that should be loaded using PC-relative
- // accesses (LARL). Operand 0 is the address.
- PCREL_WRAPPER,
-
- // Used in cases where an offset is applied to a TargetGlobalAddress.
- // Operand 0 is the full TargetGlobalAddress and operand 1 is a
- // PCREL_WRAPPER for an anchor point. This is used so that we can
- // cheaply refer to either the full address or the anchor point
- // as a register base.
- PCREL_OFFSET,
-
- // Integer comparisons. There are three operands: the two values
- // to compare, and an integer of type SystemZICMP.
- ICMP,
-
- // Floating-point comparisons. The two operands are the values to compare.
- FCMP,
-
- // Test under mask. The first operand is ANDed with the second operand
- // and the condition codes are set on the result. The third operand is
- // a boolean that is true if the condition codes need to distinguish
- // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
- // register forms do but the memory forms don't).
- TM,
-
- // Branches if a condition is true. Operand 0 is the chain operand;
- // operand 1 is the 4-bit condition-code mask, with bit N in
- // big-endian order meaning "branch if CC=N"; operand 2 is the
- // target block and operand 3 is the flag operand.
- BR_CCMASK,
-
- // Selects between operand 0 and operand 1. Operand 2 is the
- // mask of condition-code values for which operand 0 should be
- // chosen over operand 1; it has the same form as BR_CCMASK.
- // Operand 3 is the flag operand.
- SELECT_CCMASK,
-
- // Evaluates to the gap between the stack pointer and the
- // base of the dynamically-allocatable area.
- ADJDYNALLOC,
-
- // Extracts the value of a 32-bit access register. Operand 0 is
- // the number of the register.
- EXTRACT_ACCESS,
-
- // Wrappers around the ISD opcodes of the same name. The output and
- // first input operands are GR128s. The trailing numbers are the
- // widths of the second operand in bits.
- UMUL_LOHI64,
- SDIVREM32,
- SDIVREM64,
- UDIVREM32,
- UDIVREM64,
-
- // Use a series of MVCs to copy bytes from one memory location to another.
- // The operands are:
- // - the target address
- // - the source address
- // - the constant length
- //
- // This isn't a memory opcode because we'd need to attach two
- // MachineMemOperands rather than one.
- MVC,
-
- // Like MVC, but implemented as a loop that handles X*256 bytes
- // followed by straight-line code to handle the rest (if any).
- // The value of X is passed as an additional operand.
- MVC_LOOP,
-
- // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
- NC,
- NC_LOOP,
- OC,
- OC_LOOP,
- XC,
- XC_LOOP,
-
- // Use CLC to compare two blocks of memory, with the same comments
- // as for MVC and MVC_LOOP.
- CLC,
- CLC_LOOP,
-
- // Use an MVST-based sequence to implement stpcpy().
- STPCPY,
-
- // Use a CLST-based sequence to implement strcmp(). The two input operands
- // are the addresses of the strings to compare.
- STRCMP,
-
- // Use an SRST-based sequence to search a block of memory. The first
- // operand is the end address, the second is the start, and the third
- // is the character to search for. CC is set to 1 on success and 2
- // on failure.
- SEARCH_STRING,
-
- // Store the CC value in bits 29 and 28 of an integer.
- IPM,
-
- // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
- // ATOMIC_LOAD_<op>.
- //
- // Operand 0: the address of the containing 32-bit-aligned field
- // Operand 1: the second operand of <op>, in the high bits of an i32
- // for everything except ATOMIC_SWAPW
- // Operand 2: how many bits to rotate the i32 left to bring the first
- // operand into the high bits
- // Operand 3: the negative of operand 2, for rotating the other way
- // Operand 4: the width of the field in bits (8 or 16)
- ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE,
- ATOMIC_LOADW_ADD,
- ATOMIC_LOADW_SUB,
- ATOMIC_LOADW_AND,
- ATOMIC_LOADW_OR,
- ATOMIC_LOADW_XOR,
- ATOMIC_LOADW_NAND,
- ATOMIC_LOADW_MIN,
- ATOMIC_LOADW_MAX,
- ATOMIC_LOADW_UMIN,
- ATOMIC_LOADW_UMAX,
-
- // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
- //
- // Operand 0: the address of the containing 32-bit-aligned field
- // Operand 1: the compare value, in the low bits of an i32
- // Operand 2: the swap value, in the low bits of an i32
- // Operand 3: how many bits to rotate the i32 left to bring the first
- // operand into the high bits
- // Operand 4: the negative of operand 2, for rotating the other way
- // Operand 5: the width of the field in bits (8 or 16)
- ATOMIC_CMP_SWAPW,
-
- // Prefetch from the second operand using the 4-bit control code in
- // the first operand. The code is 1 for a load prefetch and 2 for
- // a store prefetch.
- PREFETCH
- };
-
- // Return true if OPCODE is some kind of PC-relative address.
- inline bool isPCREL(unsigned Opcode) {
- return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
- }
+enum {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+
+ // Return with a flag operand. Operand 0 is the chain operand.
+ RET_FLAG,
+
+ // Calls a function. Operand 0 is the chain operand and operand 1
+ // is the target address. The arguments start at operand 2.
+ // There is an optional glue operand at the end.
+ CALL,
+ SIBCALL,
+
+ // Wraps a TargetGlobalAddress that should be loaded using PC-relative
+ // accesses (LARL). Operand 0 is the address.
+ PCREL_WRAPPER,
+
+ // Used in cases where an offset is applied to a TargetGlobalAddress.
+ // Operand 0 is the full TargetGlobalAddress and operand 1 is a
+ // PCREL_WRAPPER for an anchor point. This is used so that we can
+ // cheaply refer to either the full address or the anchor point
+ // as a register base.
+ PCREL_OFFSET,
+
+ // Integer absolute.
+ IABS,
+
+ // Integer comparisons. There are three operands: the two values
+ // to compare, and an integer of type SystemZICMP.
+ ICMP,
+
+ // Floating-point comparisons. The two operands are the values to compare.
+ FCMP,
+
+ // Test under mask. The first operand is ANDed with the second operand
+ // and the condition codes are set on the result. The third operand is
+ // a boolean that is true if the condition codes need to distinguish
+ // between CCMASK_TM_MIXED_MSB_0 and CCMASK_TM_MIXED_MSB_1 (which the
+ // register forms do but the memory forms don't).
+ TM,
+
+ // Branches if a condition is true. Operand 0 is the chain operand;
+ // operand 1 is the 4-bit condition-code mask, with bit N in
+ // big-endian order meaning "branch if CC=N"; operand 2 is the
+ // target block and operand 3 is the flag operand.
+ BR_CCMASK,
+
+ // Selects between operand 0 and operand 1. Operand 2 is the
+ // mask of condition-code values for which operand 0 should be
+ // chosen over operand 1; it has the same form as BR_CCMASK.
+ // Operand 3 is the flag operand.
+ SELECT_CCMASK,
+
+ // Evaluates to the gap between the stack pointer and the
+ // base of the dynamically-allocatable area.
+ ADJDYNALLOC,
+
+ // Extracts the value of a 32-bit access register. Operand 0 is
+ // the number of the register.
+ EXTRACT_ACCESS,
+
+ // Wrappers around the ISD opcodes of the same name. The output and
+ // first input operands are GR128s. The trailing numbers are the
+ // widths of the second operand in bits.
+ UMUL_LOHI64,
+ SDIVREM32,
+ SDIVREM64,
+ UDIVREM32,
+ UDIVREM64,
+
+ // Use a series of MVCs to copy bytes from one memory location to another.
+ // The operands are:
+ // - the target address
+ // - the source address
+ // - the constant length
+ //
+ // This isn't a memory opcode because we'd need to attach two
+ // MachineMemOperands rather than one.
+ MVC,
+
+ // Like MVC, but implemented as a loop that handles X*256 bytes
+ // followed by straight-line code to handle the rest (if any).
+ // The value of X is passed as an additional operand.
+ MVC_LOOP,
+
+ // Similar to MVC and MVC_LOOP, but for logic operations (AND, OR, XOR).
+ NC,
+ NC_LOOP,
+ OC,
+ OC_LOOP,
+ XC,
+ XC_LOOP,
+
+ // Use CLC to compare two blocks of memory, with the same comments
+ // as for MVC and MVC_LOOP.
+ CLC,
+ CLC_LOOP,
+
+ // Use an MVST-based sequence to implement stpcpy().
+ STPCPY,
+
+ // Use a CLST-based sequence to implement strcmp(). The two input operands
+ // are the addresses of the strings to compare.
+ STRCMP,
+
+ // Use an SRST-based sequence to search a block of memory. The first
+ // operand is the end address, the second is the start, and the third
+ // is the character to search for. CC is set to 1 on success and 2
+ // on failure.
+ SEARCH_STRING,
+
+ // Store the CC value in bits 29 and 28 of an integer.
+ IPM,
+
+ // Perform a serialization operation. (BCR 15,0 or BCR 14,0.)
+ SERIALIZE,
+
+ // Wrappers around the inner loop of an 8- or 16-bit ATOMIC_SWAP or
+ // ATOMIC_LOAD_<op>.
+ //
+ // Operand 0: the address of the containing 32-bit-aligned field
+ // Operand 1: the second operand of <op>, in the high bits of an i32
+ // for everything except ATOMIC_SWAPW
+ // Operand 2: how many bits to rotate the i32 left to bring the first
+ // operand into the high bits
+ // Operand 3: the negative of operand 2, for rotating the other way
+ // Operand 4: the width of the field in bits (8 or 16)
+ ATOMIC_SWAPW = ISD::FIRST_TARGET_MEMORY_OPCODE,
+ ATOMIC_LOADW_ADD,
+ ATOMIC_LOADW_SUB,
+ ATOMIC_LOADW_AND,
+ ATOMIC_LOADW_OR,
+ ATOMIC_LOADW_XOR,
+ ATOMIC_LOADW_NAND,
+ ATOMIC_LOADW_MIN,
+ ATOMIC_LOADW_MAX,
+ ATOMIC_LOADW_UMIN,
+ ATOMIC_LOADW_UMAX,
+
+ // A wrapper around the inner loop of an ATOMIC_CMP_SWAP.
+ //
+ // Operand 0: the address of the containing 32-bit-aligned field
+ // Operand 1: the compare value, in the low bits of an i32
+ // Operand 2: the swap value, in the low bits of an i32
+ // Operand 3: how many bits to rotate the i32 left to bring the first
+ // operand into the high bits
+ // Operand 4: the negative of operand 2, for rotating the other way
+ // Operand 5: the width of the field in bits (8 or 16)
+ ATOMIC_CMP_SWAPW,
+
+ // Prefetch from the second operand using the 4-bit control code in
+ // the first operand. The code is 1 for a load prefetch and 2 for
+ // a store prefetch.
+ PREFETCH
+};
+
+// Return true if OPCODE is some kind of PC-relative address.
+inline bool isPCREL(unsigned Opcode) {
+ return Opcode == PCREL_WRAPPER || Opcode == PCREL_OFFSET;
}
+} // end namespace SystemZISD
namespace SystemZICMP {
- // Describes whether an integer comparison needs to be signed or unsigned,
- // or whether either type is OK.
- enum {
- Any,
- UnsignedOnly,
- SignedOnly
- };
-}
+// Describes whether an integer comparison needs to be signed or unsigned,
+// or whether either type is OK.
+enum {
+ Any,
+ UnsignedOnly,
+ SignedOnly
+};
+} // end namespace SystemZICMP
class SystemZSubtarget;
class SystemZTargetMachine;
class SystemZTargetLowering : public TargetLowering {
public:
- explicit SystemZTargetLowering(SystemZTargetMachine &TM);
+ explicit SystemZTargetLowering(const TargetMachine &TM);
// Override TargetLowering.
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const LLVM_OVERRIDE {
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override {
return MVT::i32;
}
- virtual EVT getSetCCResultType(LLVMContext &, EVT) const LLVM_OVERRIDE;
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const LLVM_OVERRIDE;
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const LLVM_OVERRIDE;
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const
- LLVM_OVERRIDE;
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
- LLVM_OVERRIDE;
- virtual bool isTruncateFree(Type *, Type *) const LLVM_OVERRIDE;
- virtual bool isTruncateFree(EVT, EVT) const LLVM_OVERRIDE;
- virtual const char *getTargetNodeName(unsigned Opcode) const LLVM_OVERRIDE;
- virtual std::pair<unsigned, const TargetRegisterClass *>
+ EVT getSetCCResultType(LLVMContext &, EVT) const override;
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
+ bool *Fast) const override;
+ bool isTruncateFree(Type *, Type *) const override;
+ bool isTruncateFree(EVT, EVT) const override;
+ const char *getTargetNodeName(unsigned Opcode) const override;
+ std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const LLVM_OVERRIDE;
- virtual TargetLowering::ConstraintType
- getConstraintType(const std::string &Constraint) const LLVM_OVERRIDE;
- virtual TargetLowering::ConstraintWeight
+ MVT VT) const override;
+ TargetLowering::ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
+ TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo &info,
- const char *constraint) const LLVM_OVERRIDE;
- virtual void
- LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const LLVM_OVERRIDE;
- virtual MachineBasicBlock *
- EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *BB) const LLVM_OVERRIDE;
- virtual SDValue LowerOperation(SDValue Op,
- SelectionDAG &DAG) const LLVM_OVERRIDE;
- virtual bool allowTruncateForTailCall(Type *, Type *) const LLVM_OVERRIDE;
- virtual bool mayBeEmittedAsTailCall(CallInst *CI) const LLVM_OVERRIDE;
- virtual SDValue
- LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- SDLoc DL, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE;
- virtual SDValue
- LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const LLVM_OVERRIDE;
-
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool IsVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc DL, SelectionDAG &DAG) const LLVM_OVERRIDE;
+ const char *constraint) const override;
+ void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+ MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const
+ override;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ bool allowTruncateForTailCall(Type *, Type *) const override;
+ bool mayBeEmittedAsTailCall(CallInst *CI) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ SDLoc DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc DL, SelectionDAG &DAG) const override;
+ SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
+ SelectionDAG &DAG) const override;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
private:
const SystemZSubtarget &Subtarget;
- const SystemZTargetMachine &TM;
// Implement LowerOperation for individual opcodes.
SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
@@ -270,9 +271,13 @@ private:
SDValue lowerUDIVREM(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerOR(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG,
- unsigned Opcode) const;
+ SDValue lowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerATOMIC_LOAD_OP(SDValue Op, SelectionDAG &DAG,
+ unsigned Opcode) const;
+ SDValue lowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerLOAD_SEQUENCE_POINT(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
index fb699b9ab8d7..84196e94a5a6 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrBuilder.h
@@ -43,6 +43,6 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI) {
return MIB.addFrameIndex(FI).addImm(Offset).addReg(0).addMemOperand(MMO);
}
-} // End llvm namespace
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
index 60800460fca7..e8841e131324 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFP.td
@@ -46,9 +46,9 @@ let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0xF in {
defm LTDBR : LoadAndTestRRE<"ltdb", 0xB312, FP64>;
defm LTXBR : LoadAndTestRRE<"ltxb", 0xB342, FP128>;
}
-def : CompareZeroFP<LTEBRCompare, FP32>;
-def : CompareZeroFP<LTDBRCompare, FP64>;
-def : CompareZeroFP<LTXBRCompare, FP128>;
+defm : CompareZeroFP<LTEBRCompare, FP32>;
+defm : CompareZeroFP<LTDBRCompare, FP64>;
+defm : CompareZeroFP<LTXBRCompare, FP128>;
// Moves between 64-bit integer and floating-point registers.
def LGDR : UnaryRRE<"lgd", 0xB3CD, bitconvert, GR64, FP64>;
@@ -133,6 +133,13 @@ def LEDBR : UnaryRRE<"ledb", 0xB344, fround, FP32, FP64>;
def LEXBR : UnaryRRE<"lexb", 0xB346, null_frag, FP128, FP128>;
def LDXBR : UnaryRRE<"ldxb", 0xB345, null_frag, FP128, FP128>;
+def LEDBRA : UnaryRRF4<"ledbra", 0xB344, FP32, FP64>,
+ Requires<[FeatureFPExtension]>;
+def LEXBRA : UnaryRRF4<"lexbra", 0xB346, FP128, FP128>,
+ Requires<[FeatureFPExtension]>;
+def LDXBRA : UnaryRRF4<"ldxbra", 0xB345, FP128, FP128>,
+ Requires<[FeatureFPExtension]>;
+
def : Pat<(f32 (fround FP128:$src)),
(EXTRACT_SUBREG (LEXBR FP128:$src), subreg_hh32)>;
def : Pat<(f64 (fround FP128:$src)),
@@ -157,6 +164,25 @@ def CEGBR : UnaryRRE<"cegb", 0xB3A4, sint_to_fp, FP32, GR64>;
def CDGBR : UnaryRRE<"cdgb", 0xB3A5, sint_to_fp, FP64, GR64>;
def CXGBR : UnaryRRE<"cxgb", 0xB3A6, sint_to_fp, FP128, GR64>;
+// Convert am unsigned integer register value to a floating-point one.
+let Predicates = [FeatureFPExtension] in {
+ def CELFBR : UnaryRRF4<"celfbr", 0xB390, FP32, GR32>;
+ def CDLFBR : UnaryRRF4<"cdlfbr", 0xB391, FP64, GR32>;
+ def CXLFBR : UnaryRRF4<"cxlfbr", 0xB392, FP128, GR32>;
+
+ def CELGBR : UnaryRRF4<"celgbr", 0xB3A0, FP32, GR64>;
+ def CDLGBR : UnaryRRF4<"cdlgbr", 0xB3A1, FP64, GR64>;
+ def CXLGBR : UnaryRRF4<"cxlgbr", 0xB3A2, FP128, GR64>;
+
+ def : Pat<(f32 (uint_to_fp GR32:$src)), (CELFBR 0, GR32:$src, 0)>;
+ def : Pat<(f64 (uint_to_fp GR32:$src)), (CDLFBR 0, GR32:$src, 0)>;
+ def : Pat<(f128 (uint_to_fp GR32:$src)), (CXLFBR 0, GR32:$src, 0)>;
+
+ def : Pat<(f32 (uint_to_fp GR64:$src)), (CELGBR 0, GR64:$src, 0)>;
+ def : Pat<(f64 (uint_to_fp GR64:$src)), (CDLGBR 0, GR64:$src, 0)>;
+ def : Pat<(f128 (uint_to_fp GR64:$src)), (CXLGBR 0, GR64:$src, 0)>;
+}
+
// Convert a floating-point register value to a signed integer value,
// with the second operand (modifier M3) specifying the rounding mode.
let Defs = [CC] in {
@@ -178,6 +204,28 @@ def : Pat<(i64 (fp_to_sint FP32:$src)), (CGEBR 5, FP32:$src)>;
def : Pat<(i64 (fp_to_sint FP64:$src)), (CGDBR 5, FP64:$src)>;
def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>;
+// Convert a floating-point register value to an unsigned integer value.
+let Predicates = [FeatureFPExtension] in {
+ let Defs = [CC] in {
+ def CLFEBR : UnaryRRF4<"clfebr", 0xB39C, GR32, FP32>;
+ def CLFDBR : UnaryRRF4<"clfdbr", 0xB39D, GR32, FP64>;
+ def CLFXBR : UnaryRRF4<"clfxbr", 0xB39E, GR32, FP128>;
+
+ def CLGEBR : UnaryRRF4<"clgebr", 0xB3AC, GR64, FP32>;
+ def CLGDBR : UnaryRRF4<"clgdbr", 0xB3AD, GR64, FP64>;
+ def CLGXBR : UnaryRRF4<"clgxbr", 0xB3AE, GR64, FP128>;
+ }
+
+ def : Pat<(i32 (fp_to_uint FP32:$src)), (CLFEBR 5, FP32:$src, 0)>;
+ def : Pat<(i32 (fp_to_uint FP64:$src)), (CLFDBR 5, FP64:$src, 0)>;
+ def : Pat<(i32 (fp_to_uint FP128:$src)), (CLFXBR 5, FP128:$src, 0)>;
+
+ def : Pat<(i64 (fp_to_uint FP32:$src)), (CLGEBR 5, FP32:$src, 0)>;
+ def : Pat<(i64 (fp_to_uint FP64:$src)), (CLGDBR 5, FP64:$src, 0)>;
+ def : Pat<(i64 (fp_to_uint FP128:$src)), (CLGXBR 5, FP128:$src, 0)>;
+}
+
+
//===----------------------------------------------------------------------===//
// Unary arithmetic
//===----------------------------------------------------------------------===//
@@ -217,15 +265,6 @@ def FIEBR : UnaryRRF<"fieb", 0xB357, FP32, FP32>;
def FIDBR : UnaryRRF<"fidb", 0xB35F, FP64, FP64>;
def FIXBR : UnaryRRF<"fixb", 0xB347, FP128, FP128>;
-// Extended forms of the previous three instructions. M4 can be set to 4
-// to suppress detection of inexact conditions.
-def FIEBRA : UnaryRRF4<"fiebra", 0xB357, FP32, FP32>,
- Requires<[FeatureFPExtension]>;
-def FIDBRA : UnaryRRF4<"fidbra", 0xB35F, FP64, FP64>,
- Requires<[FeatureFPExtension]>;
-def FIXBRA : UnaryRRF4<"fixbra", 0xB347, FP128, FP128>,
- Requires<[FeatureFPExtension]>;
-
// frint rounds according to the current mode (modifier 0) and detects
// inexact conditions.
def : Pat<(frint FP32:$src), (FIEBR 0, FP32:$src)>;
@@ -233,6 +272,12 @@ def : Pat<(frint FP64:$src), (FIDBR 0, FP64:$src)>;
def : Pat<(frint FP128:$src), (FIXBR 0, FP128:$src)>;
let Predicates = [FeatureFPExtension] in {
+ // Extended forms of the FIxBR instructions. M4 can be set to 4
+ // to suppress detection of inexact conditions.
+ def FIEBRA : UnaryRRF4<"fiebra", 0xB357, FP32, FP32>;
+ def FIDBRA : UnaryRRF4<"fidbra", 0xB35F, FP64, FP64>;
+ def FIXBRA : UnaryRRF4<"fixbra", 0xB347, FP128, FP128>;
+
// fnearbyint is like frint but does not detect inexact conditions.
def : Pat<(fnearbyint FP32:$src), (FIEBRA 0, FP32:$src, 4)>;
def : Pat<(fnearbyint FP64:$src), (FIDBRA 0, FP64:$src, 4)>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
index a8efe165e36f..9f59a1c8e7e3 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrFormats.td
@@ -511,30 +511,24 @@ class InstSS<bits<8> op, dag outs, dag ins, string asmstr, list<dag> pattern>
// to store. Other stored registers are added as implicit uses.
//
// Unary:
-// One register output operand and one input operand. The input
-// operand may be a register, immediate or memory.
+// One register output operand and one input operand.
//
// Binary:
-// One register output operand and two input operands. The first
-// input operand is always a register and he second may be a register,
-// immediate or memory.
-//
-// Shift:
-// One register output operand and two input operands. The first
-// input operand is a register and the second has the same form as
-// an address (although it isn't actually used to address memory).
+// One register output operand and two input operands.
//
// Compare:
-// Two input operands. The first operand is always a register,
-// the second may be a register, immediate or memory.
+// Two input operands and an implicit CC output operand.
//
// Ternary:
-// One register output operand and three register input operands.
+// One register output operand and three input operands.
+//
+// LoadAndOp:
+// One output operand and two input operands, one of which is an address.
+// The instruction both reads from and writes to the address.
//
// CmpSwap:
-// One output operand and three input operands. The first two
-// operands are registers and the third is an address. The instruction
-// both reads from and writes to the address.
+// One output operand and three input operands, one of which is an address.
+// The instruction both reads from and writes to the address.
//
// RotateSelect:
// One output operand and five input operands. The first two operands
@@ -687,7 +681,7 @@ class CondStoreRSY<string mnemonic, bits<16> opcode,
class AsmCondStoreRSY<string mnemonic, bits<16> opcode,
RegisterOperand cls, bits<5> bytes,
AddressingMode mode = bdaddr20only>
- : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, uimm8zx4:$R3),
+ : InstRSY<opcode, (outs), (ins cls:$R1, mode:$BD2, imm32zx4:$R3),
mnemonic#"\t$R1, $BD2, $R3", []>,
Requires<[FeatureLoadStoreOnCond]> {
let mayStore = 1;
@@ -726,7 +720,7 @@ class UnaryRRE<string mnemonic, bits<16> opcode, SDPatternOperator operator,
class UnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2),
+ : InstRRF<opcode, (outs cls1:$R1), (ins imm32zx4:$R3, cls2:$R2),
mnemonic#"r\t$R1, $R3, $R2", []> {
let OpKey = mnemonic ## cls1;
let OpType = "reg";
@@ -735,7 +729,7 @@ class UnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
class UnaryRRF4<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$R1), (ins uimm8zx4:$R3, cls2:$R2, uimm8zx4:$R4),
+ : InstRRF<opcode, (outs cls1:$R1), (ins imm32zx4:$R3, cls2:$R2, imm32zx4:$R4),
mnemonic#"\t$R1, $R3, $R2, $R4", []>;
// These instructions are generated by if conversion. The old value of R1
@@ -753,7 +747,7 @@ class CondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
// mask is the third operand rather than being part of the mnemonic.
class AsmCondUnaryRRF<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
- : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2, uimm8zx4:$R3),
+ : InstRRF<opcode, (outs cls1:$R1), (ins cls1:$R1src, cls2:$R2, imm32zx4:$R3),
mnemonic#"r\t$R1, $R2, $R3", []>,
Requires<[FeatureLoadStoreOnCond]> {
let Constraints = "$R1 = $R1src";
@@ -819,7 +813,7 @@ class CondUnaryRSY<string mnemonic, bits<16> opcode,
class AsmCondUnaryRSY<string mnemonic, bits<16> opcode,
RegisterOperand cls, bits<5> bytes,
AddressingMode mode = bdaddr20only>
- : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2, uimm8zx4:$R3),
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R1src, mode:$BD2, imm32zx4:$R3),
mnemonic#"\t$R1, $BD2, $R3", []>,
Requires<[FeatureLoadStoreOnCond]> {
let mayLoad = 1;
@@ -989,6 +983,33 @@ class BinaryRIL<string mnemonic, bits<12> opcode, SDPatternOperator operator,
let DisableEncoding = "$R1src";
}
+class BinaryRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, shift12only:$BD2),
+ mnemonic#"\t$R1, $BD2",
+ [(set cls:$R1, (operator cls:$R1src, shift12only:$BD2))]> {
+ let R3 = 0;
+ let Constraints = "$R1 = $R1src";
+ let DisableEncoding = "$R1src";
+}
+
+class BinaryRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, shift20only:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator cls:$R3, shift20only:$BD2))]>;
+
+multiclass BinaryRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
+ SDPatternOperator operator, RegisterOperand cls> {
+ let NumOpsKey = mnemonic in {
+ let NumOpsValue = "3" in
+ def K : BinaryRSY<mnemonic##"k", opcode2, null_frag, cls>,
+ Requires<[FeatureDistinctOps]>;
+ let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
+ def "" : BinaryRS<mnemonic, opcode1, operator, cls>;
+ }
+}
+
class BinaryRX<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls, SDPatternOperator load, bits<5> bytes,
AddressingMode mode = bdxaddr12only>
@@ -1073,33 +1094,6 @@ multiclass BinarySIPair<string mnemonic, bits<8> siOpcode,
}
}
-class ShiftRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
- RegisterOperand cls>
- : InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, shift12only:$BD2),
- mnemonic#"\t$R1, $BD2",
- [(set cls:$R1, (operator cls:$R1src, shift12only:$BD2))]> {
- let R3 = 0;
- let Constraints = "$R1 = $R1src";
- let DisableEncoding = "$R1src";
-}
-
-class ShiftRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
- RegisterOperand cls>
- : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, shift20only:$BD2),
- mnemonic#"\t$R1, $R3, $BD2",
- [(set cls:$R1, (operator cls:$R3, shift20only:$BD2))]>;
-
-multiclass ShiftRSAndK<string mnemonic, bits<8> opcode1, bits<16> opcode2,
- SDPatternOperator operator, RegisterOperand cls> {
- let NumOpsKey = mnemonic in {
- let NumOpsValue = "3" in
- def K : ShiftRSY<mnemonic##"k", opcode2, null_frag, cls>,
- Requires<[FeatureDistinctOps]>;
- let NumOpsValue = "2", isConvertibleToThreeAddress = 1 in
- def "" : ShiftRS<mnemonic, opcode1, operator, cls>;
- }
-}
-
class CompareRR<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls1, RegisterOperand cls2>
: InstRR<opcode, (outs), (ins cls1:$R1, cls2:$R2),
@@ -1267,6 +1261,15 @@ class TernaryRXF<string mnemonic, bits<16> opcode, SDPatternOperator operator,
let AccessBytes = bytes;
}
+class LoadAndOpRSY<string mnemonic, bits<16> opcode, SDPatternOperator operator,
+ RegisterOperand cls, AddressingMode mode = bdaddr20only>
+ : InstRSY<opcode, (outs cls:$R1), (ins cls:$R3, mode:$BD2),
+ mnemonic#"\t$R1, $R3, $BD2",
+ [(set cls:$R1, (operator mode:$BD2, cls:$R3))]> {
+ let mayLoad = 1;
+ let mayStore = 1;
+}
+
class CmpSwapRS<string mnemonic, bits<8> opcode, SDPatternOperator operator,
RegisterOperand cls, AddressingMode mode = bdaddr12only>
: InstRS<opcode, (outs cls:$R1), (ins cls:$R1src, cls:$R3, mode:$BD2),
@@ -1302,22 +1305,23 @@ multiclass CmpSwapRSPair<string mnemonic, bits<8> rsOpcode, bits<16> rsyOpcode,
class RotateSelectRIEf<string mnemonic, bits<16> opcode, RegisterOperand cls1,
RegisterOperand cls2>
: InstRIEf<opcode, (outs cls1:$R1),
- (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ (ins cls1:$R1src, cls2:$R2, imm32zx8:$I3, imm32zx8:$I4,
+ imm32zx6:$I5),
mnemonic#"\t$R1, $R2, $I3, $I4, $I5", []> {
let Constraints = "$R1 = $R1src";
let DisableEncoding = "$R1src";
}
class PrefetchRXY<string mnemonic, bits<16> opcode, SDPatternOperator operator>
- : InstRXY<opcode, (outs), (ins uimm8zx4:$R1, bdxaddr20only:$XBD2),
+ : InstRXY<opcode, (outs), (ins imm32zx4:$R1, bdxaddr20only:$XBD2),
mnemonic##"\t$R1, $XBD2",
- [(operator uimm8zx4:$R1, bdxaddr20only:$XBD2)]>;
+ [(operator imm32zx4:$R1, bdxaddr20only:$XBD2)]>;
class PrefetchRILPC<string mnemonic, bits<12> opcode,
SDPatternOperator operator>
- : InstRIL<opcode, (outs), (ins uimm8zx4:$R1, pcrel32:$I2),
+ : InstRIL<opcode, (outs), (ins imm32zx4:$R1, pcrel32:$I2),
mnemonic##"\t$R1, $I2",
- [(operator uimm8zx4:$R1, pcrel32:$I2)]> {
+ [(operator imm32zx4:$R1, pcrel32:$I2)]> {
// We want PC-relative addresses to be tried ahead of BD and BDX addresses.
// However, BDXs have two extra operands and are therefore 6 units more
// complex.
@@ -1437,7 +1441,8 @@ class StoreRXYPseudo<SDPatternOperator operator, RegisterOperand cls,
// of registers.
class RotateSelectRIEfPseudo<RegisterOperand cls1, RegisterOperand cls2>
: Pseudo<(outs cls1:$R1),
- (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5),
+ (ins cls1:$R1src, cls2:$R2, imm32zx8:$I3, imm32zx8:$I4,
+ imm32zx6:$I5),
[]> {
let Constraints = "$R1 = $R1src";
let DisableEncoding = "$R1src";
@@ -1447,9 +1452,9 @@ class RotateSelectRIEfPseudo<RegisterOperand cls1, RegisterOperand cls2>
// the value of the PSW's 2-bit condition code field.
class SelectWrapper<RegisterOperand cls>
: Pseudo<(outs cls:$dst),
- (ins cls:$src1, cls:$src2, uimm8zx4:$valid, uimm8zx4:$cc),
+ (ins cls:$src1, cls:$src2, imm32zx4:$valid, imm32zx4:$cc),
[(set cls:$dst, (z_select_ccmask cls:$src1, cls:$src2,
- uimm8zx4:$valid, uimm8zx4:$cc))]> {
+ imm32zx4:$valid, imm32zx4:$cc))]> {
let usesCustomInserter = 1;
// Although the instructions used by these nodes do not in themselves
// change CC, the insertion requires new blocks, and CC cannot be live
@@ -1463,14 +1468,14 @@ multiclass CondStores<RegisterOperand cls, SDPatternOperator store,
SDPatternOperator load, AddressingMode mode> {
let Defs = [CC], Uses = [CC], usesCustomInserter = 1 in {
def "" : Pseudo<(outs),
- (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ (ins cls:$new, mode:$addr, imm32zx4:$valid, imm32zx4:$cc),
[(store (z_select_ccmask cls:$new, (load mode:$addr),
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr)]>;
def Inv : Pseudo<(outs),
- (ins cls:$new, mode:$addr, uimm8zx4:$valid, uimm8zx4:$cc),
+ (ins cls:$new, mode:$addr, imm32zx4:$valid, imm32zx4:$cc),
[(store (z_select_ccmask (load mode:$addr), cls:$new,
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr)]>;
}
}
@@ -1598,6 +1603,7 @@ class CompareAliasRI<SDPatternOperator operator, RegisterOperand cls,
// An alias of a RotateSelectRIEf, but with different register sizes.
class RotateSelectAliasRIEf<RegisterOperand cls1, RegisterOperand cls2>
: Alias<6, (outs cls1:$R1),
- (ins cls1:$R1src, cls2:$R2, uimm8:$I3, uimm8:$I4, uimm8zx6:$I5), []> {
+ (ins cls1:$R1src, cls2:$R2, imm32zx8:$I3, imm32zx8:$I4,
+ imm32zx6:$I5), []> {
let Constraints = "$R1 = $R1src";
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index acfeed80b54a..f58ab474fbbc 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -12,17 +12,17 @@
//===----------------------------------------------------------------------===//
#include "SystemZInstrInfo.h"
-#include "SystemZTargetMachine.h"
#include "SystemZInstrBuilder.h"
+#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRMAP_INFO
#include "SystemZGenInstrInfo.inc"
-using namespace llvm;
-
// Return a mask with Count low bits set.
static uint64_t allOnes(unsigned int Count) {
return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
@@ -40,9 +40,9 @@ static bool isHighReg(unsigned int Reg) {
// Pin the vtable to this file.
void SystemZInstrInfo::anchor() {}
-SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm)
+SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
: SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
- RI(tm), TM(tm) {
+ RI(), STI(sti) {
}
// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
@@ -53,7 +53,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
MachineFunction &MF = *MBB->getParent();
// Get two load or store instructions. Use the original instruction for one
- // of them (arbitarily the second here) and create a clone for the other.
+ // of them (arbitrarily the second here) and create a clone for the other.
MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
MBB->insert(MI, EarlierMI);
@@ -280,15 +280,15 @@ bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
}
// If the block has any instructions after a JMP, delete them.
- while (llvm::next(I) != MBB.end())
- llvm::next(I)->eraseFromParent();
+ while (std::next(I) != MBB.end())
+ std::next(I)->eraseFromParent();
Cond.clear();
- FBB = 0;
+ FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
- TBB = 0;
+ TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
continue;
@@ -418,7 +418,7 @@ bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI,
static MachineInstr *getDef(unsigned Reg,
const MachineRegisterInfo *MRI) {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
- return 0;
+ return nullptr;
return MRI->getUniqueVRegDef(Reg);
}
@@ -442,7 +442,7 @@ static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) {
static bool removeIPMBasedCompare(MachineInstr *Compare, unsigned SrcReg,
const MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI) {
- MachineInstr *LGFR = 0;
+ MachineInstr *LGFR = nullptr;
MachineInstr *RLL = getDef(SrcReg, MRI);
if (RLL && RLL->getOpcode() == SystemZ::LGFR) {
LGFR = RLL;
@@ -488,7 +488,7 @@ SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare,
bool IsLogical = (Compare->getDesc().TSFlags & SystemZII::IsLogical) != 0;
if (Value == 0 &&
!IsLogical &&
- removeIPMBasedCompare(Compare, SrcReg, MRI, TM.getRegisterInfo()))
+ removeIPMBasedCompare(Compare, SrcReg, MRI, &RI))
return true;
return false;
}
@@ -505,7 +505,7 @@ static unsigned getConditionalMove(unsigned Opcode) {
bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const {
unsigned Opcode = MI->getOpcode();
- if (TM.getSubtargetImpl()->hasLoadStoreOnCond() &&
+ if (STI.hasLoadStoreOnCond() &&
getConditionalMove(Opcode))
return true;
return false;
@@ -537,12 +537,12 @@ PredicateInstruction(MachineInstr *MI,
unsigned CCMask = Pred[1].getImm();
assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
unsigned Opcode = MI->getOpcode();
- if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) {
+ if (STI.hasLoadStoreOnCond()) {
if (unsigned CondOpcode = getConditionalMove(Opcode)) {
MI->setDesc(get(CondOpcode));
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
.addImm(CCValid).addImm(CCMask)
- .addReg(SystemZ::CC, RegState::Implicit);;
+ .addReg(SystemZ::CC, RegState::Implicit);
return true;
}
}
@@ -628,16 +628,16 @@ static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
}
namespace {
- struct LogicOp {
- LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
- LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
- : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
+struct LogicOp {
+ LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {}
+ LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
+ : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
- operator bool() const { return RegSize; }
+ operator bool() const { return RegSize; }
- unsigned RegSize, ImmLSB, ImmSize;
- };
-}
+ unsigned RegSize, ImmLSB, ImmSize;
+};
+} // end anonymous namespace
static LogicOp interpretAndImmediate(unsigned Opcode) {
switch (Opcode) {
@@ -685,7 +685,7 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// We prefer to keep the two-operand form where possible both
// because it tends to be shorter and because some instructions
// have memory forms that can be used during spilling.
- if (TM.getSubtargetImpl()->hasDistinctOps()) {
+ if (STI.hasDistinctOps()) {
MachineOperand &Dest = MI->getOperand(0);
MachineOperand &Src = MI->getOperand(1);
unsigned DestReg = Dest.getReg();
@@ -740,7 +740,7 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return finishConvertToThreeAddress(MI, MIB, LV);
}
}
- return 0;
+ return nullptr;
}
MachineInstr *
@@ -761,12 +761,12 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
.addFrameIndex(FrameIndex).addImm(0)
.addImm(MI->getOperand(2).getImm());
}
- return 0;
+ return nullptr;
}
// All other cases require a single operand.
if (Ops.size() != 1)
- return 0;
+ return nullptr;
unsigned OpNum = Ops[0];
assert(Size == MF.getRegInfo()
@@ -858,14 +858,14 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
}
}
- return 0;
+ return nullptr;
}
MachineInstr *
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
- return 0;
+ return nullptr;
}
bool
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
index be4c8fe2add2..83009cb8d426 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.h
@@ -26,93 +26,94 @@ namespace llvm {
class SystemZTargetMachine;
namespace SystemZII {
- enum {
- // See comments in SystemZInstrFormats.td.
- SimpleBDXLoad = (1 << 0),
- SimpleBDXStore = (1 << 1),
- Has20BitOffset = (1 << 2),
- HasIndex = (1 << 3),
- Is128Bit = (1 << 4),
- AccessSizeMask = (31 << 5),
- AccessSizeShift = 5,
- CCValuesMask = (15 << 10),
- CCValuesShift = 10,
- CompareZeroCCMaskMask = (15 << 14),
- CompareZeroCCMaskShift = 14,
- CCMaskFirst = (1 << 18),
- CCMaskLast = (1 << 19),
- IsLogical = (1 << 20)
- };
- static inline unsigned getAccessSize(unsigned int Flags) {
- return (Flags & AccessSizeMask) >> AccessSizeShift;
- }
- static inline unsigned getCCValues(unsigned int Flags) {
- return (Flags & CCValuesMask) >> CCValuesShift;
- }
- static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
- return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift;
- }
-
- // SystemZ MachineOperand target flags.
- enum {
- // Masks out the bits for the access model.
- MO_SYMBOL_MODIFIER = (1 << 0),
-
- // @GOT (aka @GOTENT)
- MO_GOT = (1 << 0)
- };
- // Classifies a branch.
- enum BranchType {
- // An instruction that branches on the current value of CC.
- BranchNormal,
-
- // An instruction that peforms a 32-bit signed comparison and branches
- // on the result.
- BranchC,
-
- // An instruction that peforms a 32-bit unsigned comparison and branches
- // on the result.
- BranchCL,
-
- // An instruction that peforms a 64-bit signed comparison and branches
- // on the result.
- BranchCG,
-
- // An instruction that peforms a 64-bit unsigned comparison and branches
- // on the result.
- BranchCLG,
+enum {
+ // See comments in SystemZInstrFormats.td.
+ SimpleBDXLoad = (1 << 0),
+ SimpleBDXStore = (1 << 1),
+ Has20BitOffset = (1 << 2),
+ HasIndex = (1 << 3),
+ Is128Bit = (1 << 4),
+ AccessSizeMask = (31 << 5),
+ AccessSizeShift = 5,
+ CCValuesMask = (15 << 10),
+ CCValuesShift = 10,
+ CompareZeroCCMaskMask = (15 << 14),
+ CompareZeroCCMaskShift = 14,
+ CCMaskFirst = (1 << 18),
+ CCMaskLast = (1 << 19),
+ IsLogical = (1 << 20)
+};
+static inline unsigned getAccessSize(unsigned int Flags) {
+ return (Flags & AccessSizeMask) >> AccessSizeShift;
+}
+static inline unsigned getCCValues(unsigned int Flags) {
+ return (Flags & CCValuesMask) >> CCValuesShift;
+}
+static inline unsigned getCompareZeroCCMask(unsigned int Flags) {
+ return (Flags & CompareZeroCCMaskMask) >> CompareZeroCCMaskShift;
+}
- // An instruction that decrements a 32-bit register and branches if
- // the result is nonzero.
- BranchCT,
+// SystemZ MachineOperand target flags.
+enum {
+ // Masks out the bits for the access model.
+ MO_SYMBOL_MODIFIER = (1 << 0),
- // An instruction that decrements a 64-bit register and branches if
- // the result is nonzero.
- BranchCTG
- };
- // Information about a branch instruction.
- struct Branch {
- // The type of the branch.
- BranchType Type;
+ // @GOT (aka @GOTENT)
+ MO_GOT = (1 << 0)
+};
+// Classifies a branch.
+enum BranchType {
+ // An instruction that branches on the current value of CC.
+ BranchNormal,
+
+ // An instruction that peforms a 32-bit signed comparison and branches
+ // on the result.
+ BranchC,
+
+ // An instruction that peforms a 32-bit unsigned comparison and branches
+ // on the result.
+ BranchCL,
+
+ // An instruction that peforms a 64-bit signed comparison and branches
+ // on the result.
+ BranchCG,
+
+ // An instruction that peforms a 64-bit unsigned comparison and branches
+ // on the result.
+ BranchCLG,
+
+ // An instruction that decrements a 32-bit register and branches if
+ // the result is nonzero.
+ BranchCT,
+
+ // An instruction that decrements a 64-bit register and branches if
+ // the result is nonzero.
+ BranchCTG
+};
+// Information about a branch instruction.
+struct Branch {
+ // The type of the branch.
+ BranchType Type;
- // CCMASK_<N> is set if CC might be equal to N.
- unsigned CCValid;
+ // CCMASK_<N> is set if CC might be equal to N.
+ unsigned CCValid;
- // CCMASK_<N> is set if the branch should be taken when CC == N.
- unsigned CCMask;
+ // CCMASK_<N> is set if the branch should be taken when CC == N.
+ unsigned CCMask;
- // The target of the branch.
- const MachineOperand *Target;
+ // The target of the branch.
+ const MachineOperand *Target;
- Branch(BranchType type, unsigned ccValid, unsigned ccMask,
- const MachineOperand *target)
- : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
- };
-}
+ Branch(BranchType type, unsigned ccValid, unsigned ccMask,
+ const MachineOperand *target)
+ : Type(type), CCValid(ccValid), CCMask(ccMask), Target(target) {}
+};
+} // end namespace SystemZII
+class SystemZSubtarget;
class SystemZInstrInfo : public SystemZGenInstrInfo {
const SystemZRegisterInfo RI;
- SystemZTargetMachine &TM;
+ SystemZSubtarget &STI;
void splitMove(MachineBasicBlock::iterator MI, unsigned NewOpcode) const;
void splitAdjDynAlloc(MachineBasicBlock::iterator MI) const;
@@ -130,81 +131,66 @@ class SystemZInstrInfo : public SystemZGenInstrInfo {
virtual void anchor();
public:
- explicit SystemZInstrInfo(SystemZTargetMachine &TM);
+ explicit SystemZInstrInfo(SystemZSubtarget &STI);
// Override TargetInstrInfo.
- virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const LLVM_OVERRIDE;
- virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const LLVM_OVERRIDE;
- virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
- int &SrcFrameIndex) const LLVM_OVERRIDE;
- virtual bool AnalyzeBranch(MachineBasicBlock &MBB,
- MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const LLVM_OVERRIDE;
- virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const LLVM_OVERRIDE;
- virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
- MachineBasicBlock *FBB,
- const SmallVectorImpl<MachineOperand> &Cond,
- DebugLoc DL) const LLVM_OVERRIDE;
+ unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const override;
+ bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
+ int &SrcFrameIndex) const override;
+ bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const override;
+ unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
+ unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ const SmallVectorImpl<MachineOperand> &Cond,
+ DebugLoc DL) const override;
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
- unsigned &SrcReg2, int &Mask, int &Value) const
- LLVM_OVERRIDE;
+ unsigned &SrcReg2, int &Mask, int &Value) const override;
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int Mask, int Value,
- const MachineRegisterInfo *MRI) const LLVM_OVERRIDE;
- virtual bool isPredicable(MachineInstr *MI) const LLVM_OVERRIDE;
- virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
- unsigned ExtraPredCycles,
- const BranchProbability &Probability) const
- LLVM_OVERRIDE;
- virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
- unsigned NumCyclesT,
- unsigned ExtraPredCyclesT,
- MachineBasicBlock &FMBB,
- unsigned NumCyclesF,
- unsigned ExtraPredCyclesF,
- const BranchProbability &Probability) const
- LLVM_OVERRIDE;
- virtual bool
- PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const
- LLVM_OVERRIDE;
- virtual void copyPhysReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI, DebugLoc DL,
- unsigned DestReg, unsigned SrcReg,
- bool KillSrc) const LLVM_OVERRIDE;
- virtual void
- storeRegToStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned SrcReg, bool isKill, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const LLVM_OVERRIDE;
- virtual void
- loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MBBI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI) const LLVM_OVERRIDE;
- virtual MachineInstr *
- convertToThreeAddress(MachineFunction::iterator &MFI,
- MachineBasicBlock::iterator &MBBI,
- LiveVariables *LV) const;
- virtual MachineInstr *
- foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const;
- virtual MachineInstr *
- foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
- const SmallVectorImpl<unsigned> &Ops,
- MachineInstr* LoadMI) const;
- virtual bool
- expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const LLVM_OVERRIDE;
- virtual bool
- ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
- LLVM_OVERRIDE;
+ const MachineRegisterInfo *MRI) const override;
+ bool isPredicable(MachineInstr *MI) const override;
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ unsigned ExtraPredCycles,
+ const BranchProbability &Probability) const override;
+ bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumCyclesT, unsigned ExtraPredCyclesT,
+ MachineBasicBlock &FMBB,
+ unsigned NumCyclesF, unsigned ExtraPredCyclesF,
+ const BranchProbability &Probability) const override;
+ bool PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const
+ override;
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ DebugLoc DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+ MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI,
+ LiveVariables *LV) const override;
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const override;
+ MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const override;
+ bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
+ bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
+ override;
// Return the SystemZRegisterInfo, which this class owns.
const SystemZRegisterInfo &getRegisterInfo() const { return RI; }
@@ -244,7 +230,7 @@ public:
// BRANCH exists, return the opcode for the latter, otherwise return 0.
// MI, if nonnull, is the compare instruction.
unsigned getCompareAndBranch(unsigned Opcode,
- const MachineInstr *MI = 0) const;
+ const MachineInstr *MI = nullptr) const;
// Emit code before MBBI in MI to move immediate value Value into
// physical register Reg.
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 6524e442b63d..f4951ad8e0ac 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -63,11 +63,11 @@ let isBranch = 1, isTerminator = 1, Uses = [CC] in {
def BRCL : InstRIL<0xC04, (outs), (ins cond4:$valid, cond4:$R1,
brtarget32:$I2), "jg$R1\t$I2", []>;
}
- def AsmBRC : InstRI<0xA74, (outs), (ins uimm8zx4:$R1, brtarget16:$I2),
+ def AsmBRC : InstRI<0xA74, (outs), (ins imm32zx4:$R1, brtarget16:$I2),
"brc\t$R1, $I2", []>;
- def AsmBRCL : InstRIL<0xC04, (outs), (ins uimm8zx4:$R1, brtarget32:$I2),
+ def AsmBRCL : InstRIL<0xC04, (outs), (ins imm32zx4:$R1, brtarget32:$I2),
"brcl\t$R1, $I2", []>;
- def AsmBCR : InstRR<0x07, (outs), (ins uimm8zx4:$R1, GR64:$R2),
+ def AsmBCR : InstRR<0x07, (outs), (ins imm32zx4:$R1, GR64:$R2),
"bcr\t$R1, $R2", []>;
}
@@ -109,7 +109,7 @@ multiclass CompareBranches<Operand ccmask, string pos1, string pos2> {
}
let isCodeGenOnly = 1 in
defm C : CompareBranches<cond4, "$M3", "">;
-defm AsmC : CompareBranches<uimm8zx4, "", "$M3, ">;
+defm AsmC : CompareBranches<imm32zx4, "", "$M3, ">;
// Define AsmParser mnemonics for each general condition-code mask
// (integer or floating-point)
@@ -233,9 +233,7 @@ defm CondStore64 : CondStores<GR64, nonvolatile_store,
// Call instructions
//===----------------------------------------------------------------------===//
-// The definitions here are for the call-clobbered registers.
-let isCall = 1, Defs = [R0D, R1D, R2D, R3D, R4D, R5D, R14D,
- F0D, F1D, F2D, F3D, F4D, F5D, F6D, F7D, CC] in {
+let isCall = 1, Defs = [R14D, CC] in {
def CallBRASL : Alias<6, (outs), (ins pcrel32:$I2, variable_ops),
[(z_call pcrel32:$I2)]>;
def CallBASR : Alias<2, (outs), (ins ADDR64:$R2, variable_ops),
@@ -595,22 +593,28 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1,
let Defs = [CC] in {
let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
- def LPR : UnaryRR <"lp", 0x10, z_iabs32, GR32, GR32>;
- def LPGR : UnaryRRE<"lpg", 0xB900, z_iabs64, GR64, GR64>;
+ def LPR : UnaryRR <"lp", 0x10, z_iabs, GR32, GR32>;
+ def LPGR : UnaryRRE<"lpg", 0xB900, z_iabs, GR64, GR64>;
}
let CCValues = 0xE, CompareZeroCCMask = 0xE in
def LPGFR : UnaryRRE<"lpgf", 0xB910, null_frag, GR64, GR32>;
}
+def : Pat<(z_iabs32 GR32:$src), (LPR GR32:$src)>;
+def : Pat<(z_iabs64 GR64:$src), (LPGR GR64:$src)>;
+defm : SXU<z_iabs, LPGFR>;
defm : SXU<z_iabs64, LPGFR>;
let Defs = [CC] in {
let CCValues = 0xF, CompareZeroCCMask = 0x8 in {
- def LNR : UnaryRR <"ln", 0x11, z_inegabs32, GR32, GR32>;
- def LNGR : UnaryRRE<"lng", 0xB901, z_inegabs64, GR64, GR64>;
+ def LNR : UnaryRR <"ln", 0x11, z_inegabs, GR32, GR32>;
+ def LNGR : UnaryRRE<"lng", 0xB901, z_inegabs, GR64, GR64>;
}
let CCValues = 0xE, CompareZeroCCMask = 0xE in
def LNGFR : UnaryRRE<"lngf", 0xB911, null_frag, GR64, GR32>;
}
+def : Pat<(z_inegabs32 GR32:$src), (LNR GR32:$src)>;
+def : Pat<(z_inegabs64 GR64:$src), (LNGR GR64:$src)>;
+defm : SXU<z_inegabs, LNGFR>;
defm : SXU<z_inegabs64, LNGFR>;
let Defs = [CC] in {
@@ -753,7 +757,7 @@ let Defs = [CC], Uses = [CC] in {
// Subtraction
//===----------------------------------------------------------------------===//
-// Plain substraction. Although immediate forms exist, we use the
+// Plain subtraction. Although immediate forms exist, we use the
// add-immediate instruction instead.
let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
// Subtraction of a register.
@@ -849,7 +853,7 @@ let Defs = [CC] in {
}
// AND to memory
- defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, uimm8>;
+ defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, imm32zx8>;
// Block AND.
let mayLoad = 1, mayStore = 1 in
@@ -906,7 +910,7 @@ let Defs = [CC] in {
}
// OR to memory
- defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, uimm8>;
+ defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, imm32zx8>;
// Block OR.
let mayLoad = 1, mayStore = 1 in
@@ -946,7 +950,7 @@ let Defs = [CC] in {
}
// XOR to memory
- defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, uimm8>;
+ defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, imm32zx8>;
// Block XOR.
let mayLoad = 1, mayStore = 1 in
@@ -1009,26 +1013,26 @@ def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>;
// Shift left.
let neverHasSideEffects = 1 in {
- defm SLL : ShiftRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
- def SLLG : ShiftRSY<"sllg", 0xEB0D, shl, GR64>;
+ defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
+ def SLLG : BinaryRSY<"sllg", 0xEB0D, shl, GR64>;
}
// Logical shift right.
let neverHasSideEffects = 1 in {
- defm SRL : ShiftRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
- def SRLG : ShiftRSY<"srlg", 0xEB0C, srl, GR64>;
+ defm SRL : BinaryRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
+ def SRLG : BinaryRSY<"srlg", 0xEB0C, srl, GR64>;
}
// Arithmetic shift right.
let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
- defm SRA : ShiftRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>;
- def SRAG : ShiftRSY<"srag", 0xEB0A, sra, GR64>;
+ defm SRA : BinaryRSAndK<"sra", 0x8A, 0xEBDC, sra, GR32>;
+ def SRAG : BinaryRSY<"srag", 0xEB0A, sra, GR64>;
}
// Rotate left.
let neverHasSideEffects = 1 in {
- def RLL : ShiftRSY<"rll", 0xEB1D, rotl, GR32>;
- def RLLG : ShiftRSY<"rllg", 0xEB1C, rotl, GR64>;
+ def RLL : BinaryRSY<"rll", 0xEB1D, rotl, GR32>;
+ def RLLG : BinaryRSY<"rllg", 0xEB1C, rotl, GR64>;
}
// Rotate second operand left and inserted selected bits into first operand.
@@ -1043,15 +1047,15 @@ let Defs = [CC] in {
// Forms of RISBG that only affect one word of the destination register.
// They do not set CC.
-def RISBMux : RotateSelectRIEfPseudo<GRX32, GRX32>, Requires<[FeatureHighWord]>;
-def RISBLL : RotateSelectAliasRIEf<GR32, GR32>, Requires<[FeatureHighWord]>;
-def RISBLH : RotateSelectAliasRIEf<GR32, GRH32>, Requires<[FeatureHighWord]>;
-def RISBHL : RotateSelectAliasRIEf<GRH32, GR32>, Requires<[FeatureHighWord]>;
-def RISBHH : RotateSelectAliasRIEf<GRH32, GRH32>, Requires<[FeatureHighWord]>;
-def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR64>,
- Requires<[FeatureHighWord]>;
-def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GRH32, GR64>,
- Requires<[FeatureHighWord]>;
+let Predicates = [FeatureHighWord] in {
+ def RISBMux : RotateSelectRIEfPseudo<GRX32, GRX32>;
+ def RISBLL : RotateSelectAliasRIEf<GR32, GR32>;
+ def RISBLH : RotateSelectAliasRIEf<GR32, GRH32>;
+ def RISBHL : RotateSelectAliasRIEf<GRH32, GR32>;
+ def RISBHH : RotateSelectAliasRIEf<GRH32, GRH32>;
+ def RISBLG : RotateSelectRIEf<"risblg", 0xEC51, GR32, GR64>;
+ def RISBHG : RotateSelectRIEf<"risbhg", 0xEC5D, GRH32, GR64>;
+}
// Rotate second operand left and perform a logical operation with selected
// bits of the first operand. The CC result only describes the selected bits,
@@ -1195,58 +1199,89 @@ def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
// Atomic operations
//===----------------------------------------------------------------------===//
-def ATOMIC_SWAPW : AtomicLoadWBinaryReg<z_atomic_swapw>;
-def ATOMIC_SWAP_32 : AtomicLoadBinaryReg32<atomic_swap_32>;
-def ATOMIC_SWAP_64 : AtomicLoadBinaryReg64<atomic_swap_64>;
-
-def ATOMIC_LOADW_AR : AtomicLoadWBinaryReg<z_atomic_loadw_add>;
-def ATOMIC_LOADW_AFI : AtomicLoadWBinaryImm<z_atomic_loadw_add, simm32>;
-def ATOMIC_LOAD_AR : AtomicLoadBinaryReg32<atomic_load_add_32>;
-def ATOMIC_LOAD_AHI : AtomicLoadBinaryImm32<atomic_load_add_32, imm32sx16>;
-def ATOMIC_LOAD_AFI : AtomicLoadBinaryImm32<atomic_load_add_32, simm32>;
-def ATOMIC_LOAD_AGR : AtomicLoadBinaryReg64<atomic_load_add_64>;
-def ATOMIC_LOAD_AGHI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx16>;
-def ATOMIC_LOAD_AGFI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx32>;
-
-def ATOMIC_LOADW_SR : AtomicLoadWBinaryReg<z_atomic_loadw_sub>;
-def ATOMIC_LOAD_SR : AtomicLoadBinaryReg32<atomic_load_sub_32>;
-def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
-
-def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
-def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
-def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
-def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>;
-def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>;
-def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
-def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
-def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>;
-def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>;
-def ATOMIC_LOAD_NIHL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>;
-def ATOMIC_LOAD_NIHH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>;
-def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>;
-def ATOMIC_LOAD_NIHF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>;
+def Serialize : Alias<2, (outs), (ins), [(z_serialize)]>;
+
+let Predicates = [FeatureInterlockedAccess1], Defs = [CC] in {
+ def LAA : LoadAndOpRSY<"laa", 0xEBF8, atomic_load_add_32, GR32>;
+ def LAAG : LoadAndOpRSY<"laag", 0xEBE8, atomic_load_add_64, GR64>;
+ def LAAL : LoadAndOpRSY<"laal", 0xEBFA, null_frag, GR32>;
+ def LAALG : LoadAndOpRSY<"laalg", 0xEBEA, null_frag, GR64>;
+ def LAN : LoadAndOpRSY<"lan", 0xEBF4, atomic_load_and_32, GR32>;
+ def LANG : LoadAndOpRSY<"lang", 0xEBE4, atomic_load_and_64, GR64>;
+ def LAO : LoadAndOpRSY<"lao", 0xEBF6, atomic_load_or_32, GR32>;
+ def LAOG : LoadAndOpRSY<"laog", 0xEBE6, atomic_load_or_64, GR64>;
+ def LAX : LoadAndOpRSY<"lax", 0xEBF7, atomic_load_xor_32, GR32>;
+ def LAXG : LoadAndOpRSY<"laxg", 0xEBE7, atomic_load_xor_64, GR64>;
+}
+
+def ATOMIC_SWAPW : AtomicLoadWBinaryReg<z_atomic_swapw>;
+def ATOMIC_SWAP_32 : AtomicLoadBinaryReg32<atomic_swap_32>;
+def ATOMIC_SWAP_64 : AtomicLoadBinaryReg64<atomic_swap_64>;
+
+def ATOMIC_LOADW_AR : AtomicLoadWBinaryReg<z_atomic_loadw_add>;
+def ATOMIC_LOADW_AFI : AtomicLoadWBinaryImm<z_atomic_loadw_add, simm32>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_AR : AtomicLoadBinaryReg32<atomic_load_add_32>;
+ def ATOMIC_LOAD_AHI : AtomicLoadBinaryImm32<atomic_load_add_32, imm32sx16>;
+ def ATOMIC_LOAD_AFI : AtomicLoadBinaryImm32<atomic_load_add_32, simm32>;
+ def ATOMIC_LOAD_AGR : AtomicLoadBinaryReg64<atomic_load_add_64>;
+ def ATOMIC_LOAD_AGHI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx16>;
+ def ATOMIC_LOAD_AGFI : AtomicLoadBinaryImm64<atomic_load_add_64, imm64sx32>;
+}
+
+def ATOMIC_LOADW_SR : AtomicLoadWBinaryReg<z_atomic_loadw_sub>;
+def ATOMIC_LOAD_SR : AtomicLoadBinaryReg32<atomic_load_sub_32>;
+def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
+
+def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
+def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
+ def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32,
+ imm32ll16c>;
+ def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32,
+ imm32lh16c>;
+ def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
+ def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
+ def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64ll16c>;
+ def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64lh16c>;
+ def ATOMIC_LOAD_NIHL64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64hl16c>;
+ def ATOMIC_LOAD_NIHH64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64hh16c>;
+ def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64lf32c>;
+ def ATOMIC_LOAD_NIHF64 : AtomicLoadBinaryImm64<atomic_load_and_64,
+ imm64hf32c>;
+}
def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>;
def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>;
-def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
-def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
-def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
-def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
-def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
-def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
-def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
-def ATOMIC_LOAD_OIHL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
-def ATOMIC_LOAD_OIHH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
-def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
-def ATOMIC_LOAD_OIHF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
+ def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
+ def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
+ def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
+ def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
+ def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
+ def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
+ def ATOMIC_LOAD_OIHL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
+ def ATOMIC_LOAD_OIHH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
+ def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
+ def ATOMIC_LOAD_OIHF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
+}
def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>;
def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>;
-def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
-def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
-def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
-def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
-def ATOMIC_LOAD_XIHF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
+let Predicates = [FeatureNoInterlockedAccess1] in {
+ def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
+ def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
+ def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
+ def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
+ def ATOMIC_LOAD_XIHF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
+}
def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>;
def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand,
@@ -1366,15 +1401,15 @@ def : Pat<(sub GR64:$src1, (azextloadi32 bdxaddr20only:$addr)),
// Optimize sign-extended 1/0 selects to -1/0 selects. This is important
// for vector legalization.
-def : Pat<(sra (shl (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid, uimm8zx4:$cc)),
+def : Pat<(sra (shl (i32 (z_select_ccmask 1, 0, imm32zx4:$valid, imm32zx4:$cc)),
(i32 31)),
(i32 31)),
- (Select32 (LHI -1), (LHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
-def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, uimm8zx4:$valid,
- uimm8zx4:$cc)))),
+ (Select32 (LHI -1), (LHI 0), imm32zx4:$valid, imm32zx4:$cc)>;
+def : Pat<(sra (shl (i64 (anyext (i32 (z_select_ccmask 1, 0, imm32zx4:$valid,
+ imm32zx4:$cc)))),
(i32 63)),
(i32 63)),
- (Select64 (LGHI -1), (LGHI 0), uimm8zx4:$valid, uimm8zx4:$cc)>;
+ (Select64 (LGHI -1), (LGHI 0), imm32zx4:$valid, imm32zx4:$cc)>;
// Peepholes for turning scalar operations into block operations.
defm : BlockLoadStore<anyextloadi8, i32, MVCSequence, NCSequence, OCSequence,
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
index ba027d460440..808133432486 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp
@@ -53,8 +53,6 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-long-branch"
-
#include "SystemZTargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -68,102 +66,105 @@
using namespace llvm;
+#define DEBUG_TYPE "systemz-long-branch"
+
STATISTIC(LongBranches, "Number of long branches.");
namespace {
- // Represents positional information about a basic block.
- struct MBBInfo {
- // The address that we currently assume the block has.
- uint64_t Address;
-
- // The size of the block in bytes, excluding terminators.
- // This value never changes.
- uint64_t Size;
-
- // The minimum alignment of the block, as a log2 value.
- // This value never changes.
- unsigned Alignment;
-
- // The number of terminators in this block. This value never changes.
- unsigned NumTerminators;
-
- MBBInfo()
- : Address(0), Size(0), Alignment(0), NumTerminators(0) {}
- };
-
- // Represents the state of a block terminator.
- struct TerminatorInfo {
- // If this terminator is a relaxable branch, this points to the branch
- // instruction, otherwise it is null.
- MachineInstr *Branch;
-
- // The address that we currently assume the terminator has.
- uint64_t Address;
-
- // The current size of the terminator in bytes.
- uint64_t Size;
-
- // If Branch is nonnull, this is the number of the target block,
- // otherwise it is unused.
- unsigned TargetBlock;
-
- // If Branch is nonnull, this is the length of the longest relaxed form,
- // otherwise it is zero.
- unsigned ExtraRelaxSize;
-
- TerminatorInfo() : Branch(0), Size(0), TargetBlock(0), ExtraRelaxSize(0) {}
- };
-
- // Used to keep track of the current position while iterating over the blocks.
- struct BlockPosition {
- // The address that we assume this position has.
- uint64_t Address;
-
- // The number of low bits in Address that are known to be the same
- // as the runtime address.
- unsigned KnownBits;
-
- BlockPosition(unsigned InitialAlignment)
- : Address(0), KnownBits(InitialAlignment) {}
- };
-
- class SystemZLongBranch : public MachineFunctionPass {
- public:
- static char ID;
- SystemZLongBranch(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(0) {}
-
- virtual const char *getPassName() const {
- return "SystemZ Long Branch";
- }
+// Represents positional information about a basic block.
+struct MBBInfo {
+ // The address that we currently assume the block has.
+ uint64_t Address;
+
+ // The size of the block in bytes, excluding terminators.
+ // This value never changes.
+ uint64_t Size;
+
+ // The minimum alignment of the block, as a log2 value.
+ // This value never changes.
+ unsigned Alignment;
+
+ // The number of terminators in this block. This value never changes.
+ unsigned NumTerminators;
+
+ MBBInfo()
+ : Address(0), Size(0), Alignment(0), NumTerminators(0) {}
+};
+
+// Represents the state of a block terminator.
+struct TerminatorInfo {
+ // If this terminator is a relaxable branch, this points to the branch
+ // instruction, otherwise it is null.
+ MachineInstr *Branch;
+
+ // The address that we currently assume the terminator has.
+ uint64_t Address;
+
+ // The current size of the terminator in bytes.
+ uint64_t Size;
+
+ // If Branch is nonnull, this is the number of the target block,
+ // otherwise it is unused.
+ unsigned TargetBlock;
+
+ // If Branch is nonnull, this is the length of the longest relaxed form,
+ // otherwise it is zero.
+ unsigned ExtraRelaxSize;
+
+ TerminatorInfo() : Branch(nullptr), Size(0), TargetBlock(0),
+ ExtraRelaxSize(0) {}
+};
+
+// Used to keep track of the current position while iterating over the blocks.
+struct BlockPosition {
+ // The address that we assume this position has.
+ uint64_t Address;
+
+ // The number of low bits in Address that are known to be the same
+ // as the runtime address.
+ unsigned KnownBits;
+
+ BlockPosition(unsigned InitialAlignment)
+ : Address(0), KnownBits(InitialAlignment) {}
+};
+
+class SystemZLongBranch : public MachineFunctionPass {
+public:
+ static char ID;
+ SystemZLongBranch(const SystemZTargetMachine &tm)
+ : MachineFunctionPass(ID), TII(nullptr) {}
+
+ const char *getPassName() const override {
+ return "SystemZ Long Branch";
+ }
- bool runOnMachineFunction(MachineFunction &F);
-
- private:
- void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
- void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
- bool AssumeRelaxed);
- TerminatorInfo describeTerminator(MachineInstr *MI);
- uint64_t initMBBInfo();
- bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
- bool mustRelaxABranch();
- void setWorstCaseAddresses();
- void splitBranchOnCount(MachineInstr *MI, unsigned AddOpcode);
- void splitCompareBranch(MachineInstr *MI, unsigned CompareOpcode);
- void relaxBranch(TerminatorInfo &Terminator);
- void relaxBranches();
-
- const SystemZInstrInfo *TII;
- MachineFunction *MF;
- SmallVector<MBBInfo, 16> MBBs;
- SmallVector<TerminatorInfo, 16> Terminators;
- };
-
- char SystemZLongBranch::ID = 0;
-
- const uint64_t MaxBackwardRange = 0x10000;
- const uint64_t MaxForwardRange = 0xfffe;
-} // end of anonymous namespace
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+private:
+ void skipNonTerminators(BlockPosition &Position, MBBInfo &Block);
+ void skipTerminator(BlockPosition &Position, TerminatorInfo &Terminator,
+ bool AssumeRelaxed);
+ TerminatorInfo describeTerminator(MachineInstr *MI);
+ uint64_t initMBBInfo();
+ bool mustRelaxBranch(const TerminatorInfo &Terminator, uint64_t Address);
+ bool mustRelaxABranch();
+ void setWorstCaseAddresses();
+ void splitBranchOnCount(MachineInstr *MI, unsigned AddOpcode);
+ void splitCompareBranch(MachineInstr *MI, unsigned CompareOpcode);
+ void relaxBranch(TerminatorInfo &Terminator);
+ void relaxBranches();
+
+ const SystemZInstrInfo *TII;
+ MachineFunction *MF;
+ SmallVector<MBBInfo, 16> MBBs;
+ SmallVector<TerminatorInfo, 16> Terminators;
+};
+
+char SystemZLongBranch::ID = 0;
+
+const uint64_t MaxBackwardRange = 0x10000;
+const uint64_t MaxForwardRange = 0xfffe;
+} // end anonymous namespace
FunctionPass *llvm::createSystemZLongBranchPass(SystemZTargetMachine &TM) {
return new SystemZLongBranch(TM);
@@ -321,9 +322,8 @@ bool SystemZLongBranch::mustRelaxBranch(const TerminatorInfo &Terminator,
// Return true if, under current assumptions, any terminator needs
// to be relaxed.
bool SystemZLongBranch::mustRelaxABranch() {
- for (SmallVectorImpl<TerminatorInfo>::iterator TI = Terminators.begin(),
- TE = Terminators.end(); TI != TE; ++TI)
- if (mustRelaxBranch(*TI, TI->Address))
+ for (auto &Terminator : Terminators)
+ if (mustRelaxBranch(Terminator, Terminator.Address))
return true;
return false;
}
@@ -333,10 +333,9 @@ bool SystemZLongBranch::mustRelaxABranch() {
void SystemZLongBranch::setWorstCaseAddresses() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
BlockPosition Position(MF->getAlignment());
- for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
- BI != BE; ++BI) {
- skipNonTerminators(Position, *BI);
- for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ for (auto &Block : MBBs) {
+ skipNonTerminators(Position, Block);
+ for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
skipTerminator(Position, *TI, true);
++TI;
}
@@ -426,7 +425,7 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
Terminator.Size += Terminator.ExtraRelaxSize;
Terminator.ExtraRelaxSize = 0;
- Terminator.Branch = 0;
+ Terminator.Branch = nullptr;
++LongBranches;
}
@@ -435,10 +434,9 @@ void SystemZLongBranch::relaxBranch(TerminatorInfo &Terminator) {
void SystemZLongBranch::relaxBranches() {
SmallVector<TerminatorInfo, 16>::iterator TI = Terminators.begin();
BlockPosition Position(MF->getAlignment());
- for (SmallVectorImpl<MBBInfo>::iterator BI = MBBs.begin(), BE = MBBs.end();
- BI != BE; ++BI) {
- skipNonTerminators(Position, *BI);
- for (unsigned BTI = 0, BTE = BI->NumTerminators; BTI != BTE; ++BTI) {
+ for (auto &Block : MBBs) {
+ skipNonTerminators(Position, Block);
+ for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) {
assert(Position.Address <= TI->Address &&
"Addresses shouldn't go forwards");
if (mustRelaxBranch(*TI, Position.Address))
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
index ff9a6c0a221f..df561e2d8002 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp
@@ -9,9 +9,9 @@
#include "SystemZMCInstLower.h"
#include "SystemZAsmPrinter.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/Target/Mangler.h"
using namespace llvm;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
index f6d5ac8c285d..90447ffe90f0 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMCInstLower.h
@@ -11,8 +11,8 @@
#define LLVM_SYSTEMZMCINSTLOWER_H
#include "llvm/MC/MCExpr.h"
-#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
namespace llvm {
class MCInst;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
index 845291f4f3fa..50865f135beb 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZMachineFunctionInfo.h
@@ -63,6 +63,6 @@ public:
void setManipulatesSP(bool MSP) { ManipulatesSP = MSP; }
};
-} // end llvm namespace
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
index 3ad146c57d92..7be81dca727b 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperands.td
@@ -202,21 +202,6 @@ def S32Imm : ImmediateAsmOperand<"S32Imm">;
def U32Imm : ImmediateAsmOperand<"U32Imm">;
//===----------------------------------------------------------------------===//
-// 8-bit immediates
-//===----------------------------------------------------------------------===//
-
-def uimm8zx4 : Immediate<i8, [{
- return isUInt<4>(N->getZExtValue());
-}], NOOP_SDNodeXForm, "U4Imm">;
-
-def uimm8zx6 : Immediate<i8, [{
- return isUInt<6>(N->getZExtValue());
-}], NOOP_SDNodeXForm, "U6Imm">;
-
-def simm8 : Immediate<i8, [{}], SIMM8, "S8Imm">;
-def uimm8 : Immediate<i8, [{}], UIMM8, "U8Imm">;
-
-//===----------------------------------------------------------------------===//
// i32 immediates
//===----------------------------------------------------------------------===//
@@ -241,6 +226,14 @@ def imm32lh16c : Immediate<i32, [{
}], LH16, "U16Imm">;
// Short immediates
+def imm32zx4 : Immediate<i32, [{
+ return isUInt<4>(N->getZExtValue());
+}], NOOP_SDNodeXForm, "U4Imm">;
+
+def imm32zx6 : Immediate<i32, [{
+ return isUInt<6>(N->getZExtValue());
+}], NOOP_SDNodeXForm, "U6Imm">;
+
def imm32sx8 : Immediate<i32, [{
return isInt<8>(N->getSExtValue());
}], SIMM8, "S8Imm">;
@@ -470,13 +463,13 @@ def AccessReg : AsmOperandClass {
let Name = "AccessReg";
let ParserMethod = "parseAccessReg";
}
-def access_reg : Immediate<i8, [{ return N->getZExtValue() < 16; }],
+def access_reg : Immediate<i32, [{ return N->getZExtValue() < 16; }],
NOOP_SDNodeXForm, "AccessReg"> {
let ParserMatchClass = AccessReg;
}
// A 4-bit condition-code mask.
-def cond4 : PatLeaf<(i8 imm), [{ return (N->getZExtValue() < 16); }]>,
- Operand<i8> {
+def cond4 : PatLeaf<(i32 imm), [{ return (N->getZExtValue() < 16); }]>,
+ Operand<i32> {
let PrintMethod = "printCond4Operand";
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
index 31cabaa3413f..c70e662db427 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZOperators.td
@@ -19,14 +19,14 @@ def SDT_ZICmp : SDTypeProfile<0, 3,
[SDTCisSameAs<0, 1>,
SDTCisVT<2, i32>]>;
def SDT_ZBRCCMask : SDTypeProfile<0, 3,
- [SDTCisVT<0, i8>,
- SDTCisVT<1, i8>,
+ [SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>,
SDTCisVT<2, OtherVT>]>;
def SDT_ZSelectCCMask : SDTypeProfile<1, 4,
[SDTCisSameAs<0, 1>,
SDTCisSameAs<1, 2>,
- SDTCisVT<3, i8>,
- SDTCisVT<4, i8>]>;
+ SDTCisVT<3, i32>,
+ SDTCisVT<4, i32>]>;
def SDT_ZWrapPtr : SDTypeProfile<1, 1,
[SDTCisSameAs<0, 1>,
SDTCisPtrTy<0>]>;
@@ -37,7 +37,7 @@ def SDT_ZWrapOffset : SDTypeProfile<1, 2,
def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
def SDT_ZExtractAccess : SDTypeProfile<1, 1,
[SDTCisVT<0, i32>,
- SDTCisVT<1, i8>]>;
+ SDTCisVT<1, i32>]>;
def SDT_ZGR128Binary32 : SDTypeProfile<1, 2,
[SDTCisVT<0, untyped>,
SDTCisVT<1, untyped>,
@@ -77,7 +77,7 @@ def SDT_ZString : SDTypeProfile<1, 3,
SDTCisVT<3, i32>]>;
def SDT_ZI32Intrinsic : SDTypeProfile<1, 0, [SDTCisVT<0, i32>]>;
def SDT_ZPrefetch : SDTypeProfile<0, 2,
- [SDTCisVT<0, i8>,
+ [SDTCisVT<0, i32>,
SDTCisPtrTy<1>]>;
//===----------------------------------------------------------------------===//
@@ -103,6 +103,7 @@ def z_sibcall : SDNode<"SystemZISD::SIBCALL", SDT_ZCall,
def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
def z_pcrel_offset : SDNode<"SystemZISD::PCREL_OFFSET",
SDT_ZWrapOffset, []>;
+def z_iabs : SDNode<"SystemZISD::IABS", SDTIntUnaryOp, []>;
def z_icmp : SDNode<"SystemZISD::ICMP", SDT_ZICmp, [SDNPOutGlue]>;
def z_fcmp : SDNode<"SystemZISD::FCMP", SDT_ZCmp, [SDNPOutGlue]>;
def z_tm : SDNode<"SystemZISD::TM", SDT_ZICmp, [SDNPOutGlue]>;
@@ -119,6 +120,9 @@ def z_sdivrem64 : SDNode<"SystemZISD::SDIVREM64", SDT_ZGR128Binary64>;
def z_udivrem32 : SDNode<"SystemZISD::UDIVREM32", SDT_ZGR128Binary32>;
def z_udivrem64 : SDNode<"SystemZISD::UDIVREM64", SDT_ZGR128Binary64>;
+def z_serialize : SDNode<"SystemZISD::SERIALIZE", SDTNone,
+ [SDNPHasChain, SDNPMayStore]>;
+
class AtomicWOp<string name, SDTypeProfile profile = SDT_ZAtomicLoadBinaryW>
: SDNode<"SystemZISD::"##name, profile,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
@@ -247,7 +251,7 @@ def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
// Aligned loads.
class AlignedLoad<SDPatternOperator load>
: PatFrag<(ops node:$addr), (load node:$addr), [{
- LoadSDNode *Load = cast<LoadSDNode>(N);
+ auto *Load = cast<LoadSDNode>(N);
return Load->getAlignment() >= Load->getMemoryVT().getStoreSize();
}]>;
def aligned_load : AlignedLoad<load>;
@@ -259,7 +263,7 @@ def aligned_azextloadi32 : AlignedLoad<azextloadi32>;
// Aligned stores.
class AlignedStore<SDPatternOperator store>
: PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
- StoreSDNode *Store = cast<StoreSDNode>(N);
+ auto *Store = cast<StoreSDNode>(N);
return Store->getAlignment() >= Store->getMemoryVT().getStoreSize();
}]>;
def aligned_store : AlignedStore<store>;
@@ -270,7 +274,7 @@ def aligned_truncstorei32 : AlignedStore<truncstorei32>;
// location multiple times.
class NonvolatileLoad<SDPatternOperator load>
: PatFrag<(ops node:$addr), (load node:$addr), [{
- LoadSDNode *Load = cast<LoadSDNode>(N);
+ auto *Load = cast<LoadSDNode>(N);
return !Load->isVolatile();
}]>;
def nonvolatile_load : NonvolatileLoad<load>;
@@ -281,7 +285,7 @@ def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
// Non-volatile stores.
class NonvolatileStore<SDPatternOperator store>
: PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
- StoreSDNode *Store = cast<StoreSDNode>(N);
+ auto *Store = cast<StoreSDNode>(N);
return !Store->isVolatile();
}]>;
def nonvolatile_store : NonvolatileStore<store>;
@@ -346,6 +350,9 @@ def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
APInt::getLowBitsSet(BitWidth, 8));
}]>;
+// Negative integer absolute.
+def z_inegabs : PatFrag<(ops node:$src), (ineg (z_iabs node:$src))>;
+
// Integer absolute, matching the canonical form generated by DAGCombiner.
def z_iabs32 : PatFrag<(ops node:$src),
(xor (add node:$src, (sra node:$src, (i32 31))),
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
index 7706351e54b3..e307f8a888ee 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZPatterns.td
@@ -101,15 +101,15 @@ multiclass CondStores64<Instruction insn, Instruction insninv,
SDPatternOperator store, SDPatternOperator load,
AddressingMode mode> {
def : Pat<(store (z_select_ccmask GR64:$new, (load mode:$addr),
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr),
(insn (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
- uimm8zx4:$valid, uimm8zx4:$cc)>;
+ imm32zx4:$valid, imm32zx4:$cc)>;
def : Pat<(store (z_select_ccmask (load mode:$addr), GR64:$new,
- uimm8zx4:$valid, uimm8zx4:$cc),
+ imm32zx4:$valid, imm32zx4:$cc),
mode:$addr),
(insninv (EXTRACT_SUBREG GR64:$new, subreg_l32), mode:$addr,
- uimm8zx4:$valid, uimm8zx4:$cc)>;
+ imm32zx4:$valid, imm32zx4:$cc)>;
}
// Try to use MVC instruction INSN for a load of type LOAD followed by a store
@@ -148,5 +148,8 @@ multiclass BlockLoadStore<SDPatternOperator load, ValueType vt,
// Record that INSN is a LOAD AND TEST that can be used to compare
// registers in CLS against zero. The instruction has separate R1 and R2
// operands, but they must be the same when the instruction is used like this.
-class CompareZeroFP<Instruction insn, RegisterOperand cls>
- : Pat<(z_fcmp cls:$reg, (fpimm0)), (insn cls:$reg, cls:$reg)>;
+multiclass CompareZeroFP<Instruction insn, RegisterOperand cls> {
+ def : Pat<(z_fcmp cls:$reg, (fpimm0)), (insn cls:$reg, cls:$reg)>;
+ // The sign of the zero makes no difference.
+ def : Pat<(z_fcmp cls:$reg, (fpimmneg0)), (insn cls:$reg, cls:$reg)>;
+}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
index f241fb0c2222..e6b58f17b0e6 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZProcessors.td
@@ -16,6 +16,9 @@ class SystemZFeature<string extname, string intname, string desc>
AssemblerPredicate<"Feature"##intname, extname>,
SubtargetFeature<extname, "Has"##intname, "true", desc>;
+class SystemZMissingFeature<string intname>
+ : Predicate<"!Subtarget.has"##intname##"()">;
+
def FeatureDistinctOps : SystemZFeature<
"distinct-ops", "DistinctOps",
"Assume that the distinct-operands facility is installed"
@@ -36,11 +39,24 @@ def FeatureFPExtension : SystemZFeature<
"Assume that the floating-point extension facility is installed"
>;
+def FeatureFastSerialization : SystemZFeature<
+ "fast-serialization", "FastSerialization",
+ "Assume that the fast-serialization facility is installed"
+>;
+
+def FeatureInterlockedAccess1 : SystemZFeature<
+ "interlocked-access1", "InterlockedAccess1",
+ "Assume that interlocked-access facility 1 is installed"
+>;
+def FeatureNoInterlockedAccess1 : SystemZMissingFeature<"InterlockedAccess1">;
+
def : Processor<"generic", NoItineraries, []>;
def : Processor<"z10", NoItineraries, []>;
def : Processor<"z196", NoItineraries,
[FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
- FeatureFPExtension]>;
+ FeatureFPExtension, FeatureFastSerialization,
+ FeatureInterlockedAccess1]>;
def : Processor<"zEC12", NoItineraries,
[FeatureDistinctOps, FeatureLoadStoreOnCond, FeatureHighWord,
- FeatureFPExtension]>;
+ FeatureFPExtension, FeatureFastSerialization,
+ FeatureInterlockedAccess1]>;
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
index b61ae88f733c..f03bcc412d51 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.cpp
@@ -7,31 +7,29 @@
//
//===----------------------------------------------------------------------===//
+#include "SystemZInstrInfo.h"
#include "SystemZRegisterInfo.h"
-#include "SystemZTargetMachine.h"
+#include "SystemZSubtarget.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetFrameLowering.h"
+
+using namespace llvm;
#define GET_REGINFO_TARGET_DESC
#include "SystemZGenRegisterInfo.inc"
-using namespace llvm;
-
-SystemZRegisterInfo::SystemZRegisterInfo(SystemZTargetMachine &tm)
- : SystemZGenRegisterInfo(SystemZ::R14D), TM(tm) {}
+SystemZRegisterInfo::SystemZRegisterInfo()
+ : SystemZGenRegisterInfo(SystemZ::R14D) {}
-const uint16_t*
+const MCPhysReg *
SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
- static const uint16_t CalleeSavedRegs[] = {
- SystemZ::R6D, SystemZ::R7D, SystemZ::R8D, SystemZ::R9D,
- SystemZ::R10D, SystemZ::R11D, SystemZ::R12D, SystemZ::R13D,
- SystemZ::R14D, SystemZ::R15D,
- SystemZ::F8D, SystemZ::F9D, SystemZ::F10D, SystemZ::F11D,
- SystemZ::F12D, SystemZ::F13D, SystemZ::F14D, SystemZ::F15D,
- 0
- };
-
- return CalleeSavedRegs;
+ return CSR_SystemZ_SaveList;
+}
+
+const uint32_t *
+SystemZRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
+ return CSR_SystemZ_RegMask;
}
BitVector
@@ -63,8 +61,8 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
MachineBasicBlock &MBB = *MI->getParent();
MachineFunction &MF = *MBB.getParent();
- const SystemZInstrInfo &TII =
- *static_cast<const SystemZInstrInfo*>(TM.getInstrInfo());
+ auto *TII =
+ static_cast<const SystemZInstrInfo *>(MF.getTarget().getInstrInfo());
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
DebugLoc DL = MI->getDebugLoc();
@@ -84,7 +82,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
// See if the offset is in range, or if an equivalent instruction that
// accepts the offset exists.
unsigned Opcode = MI->getOpcode();
- unsigned OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset);
+ unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
if (OpcodeForOffset)
MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
else {
@@ -94,7 +92,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
int64_t Mask = 0xffff;
do {
Offset = OldOffset & Mask;
- OpcodeForOffset = TII.getOpcodeForOffset(Opcode, Offset);
+ OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
Mask >>= 1;
assert(Mask && "One offset must be OK");
} while (!OpcodeForOffset);
@@ -107,21 +105,21 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
&& MI->getOperand(FIOperandNum + 2).getReg() == 0) {
// Load the offset into the scratch register and use it as an index.
// The scratch register then dies here.
- TII.loadImmediate(MBB, MI, ScratchReg, HighOffset);
+ TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
false, false, true);
} else {
// Load the anchor address into a scratch register.
- unsigned LAOpcode = TII.getOpcodeForOffset(SystemZ::LA, HighOffset);
+ unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
if (LAOpcode)
- BuildMI(MBB, MI, DL, TII.get(LAOpcode),ScratchReg)
+ BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
.addReg(BasePtr).addImm(HighOffset).addReg(0);
else {
// Load the high offset into the scratch register and use it as
// an index.
- TII.loadImmediate(MBB, MI, ScratchReg, HighOffset);
- BuildMI(MBB, MI, DL, TII.get(SystemZ::AGR),ScratchReg)
+ TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
+ BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg)
.addReg(ScratchReg, RegState::Kill).addReg(BasePtr);
}
@@ -130,7 +128,7 @@ SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
false, false, true);
}
}
- MI->setDesc(TII.get(OpcodeForOffset));
+ MI->setDesc(TII->get(OpcodeForOffset));
MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
index 13f45faba076..9bffa467a15d 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.h
@@ -19,48 +19,38 @@
namespace llvm {
namespace SystemZ {
- // Return the subreg to use for referring to the even and odd registers
- // in a GR128 pair. Is32Bit says whether we want a GR32 or GR64.
- inline unsigned even128(bool Is32bit) {
- return Is32bit ? subreg_hl32 : subreg_h64;
- }
- inline unsigned odd128(bool Is32bit) {
- return Is32bit ? subreg_l32 : subreg_l64;
- }
+// Return the subreg to use for referring to the even and odd registers
+// in a GR128 pair. Is32Bit says whether we want a GR32 or GR64.
+inline unsigned even128(bool Is32bit) {
+ return Is32bit ? subreg_hl32 : subreg_h64;
}
-
-class SystemZSubtarget;
-class SystemZInstrInfo;
+inline unsigned odd128(bool Is32bit) {
+ return Is32bit ? subreg_l32 : subreg_l64;
+}
+} // end namespace SystemZ
struct SystemZRegisterInfo : public SystemZGenRegisterInfo {
-private:
- SystemZTargetMachine &TM;
-
public:
- SystemZRegisterInfo(SystemZTargetMachine &tm);
+ SystemZRegisterInfo();
// Override TargetRegisterInfo.h.
- virtual bool requiresRegisterScavenging(const MachineFunction &MF) const
- LLVM_OVERRIDE {
+ bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
}
- virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const
- LLVM_OVERRIDE {
+ bool requiresFrameIndexScavenging(const MachineFunction &MF) const override {
return true;
}
- virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const
- LLVM_OVERRIDE {
+ bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override {
return true;
}
- virtual const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0)
- const LLVM_OVERRIDE;
- virtual BitVector getReservedRegs(const MachineFunction &MF)
- const LLVM_OVERRIDE;
- virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
- int SPAdj, unsigned FIOperandNum,
- RegScavenger *RS) const LLVM_OVERRIDE;
- virtual unsigned getFrameRegister(const MachineFunction &MF) const
- LLVM_OVERRIDE;
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF = nullptr) const
+ override;
+ const uint32_t *getCallPreservedMask(CallingConv::ID CC) const override;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const override;
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
index 93d7c8375b3d..47ac20dae78a 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZRegisterInfo.td
@@ -119,6 +119,29 @@ defm ADDR128 : SystemZRegClass<"ADDR128", untyped, 128, (sub GR128Bit, R0Q)>;
// Floating-point registers
//===----------------------------------------------------------------------===//
+// Maps FPR register numbers to their DWARF encoding.
+class DwarfMapping<int id> { int Id = id; }
+
+def F0Dwarf : DwarfMapping<16>;
+def F2Dwarf : DwarfMapping<17>;
+def F4Dwarf : DwarfMapping<18>;
+def F6Dwarf : DwarfMapping<19>;
+
+def F1Dwarf : DwarfMapping<20>;
+def F3Dwarf : DwarfMapping<21>;
+def F5Dwarf : DwarfMapping<22>;
+def F7Dwarf : DwarfMapping<23>;
+
+def F8Dwarf : DwarfMapping<24>;
+def F10Dwarf : DwarfMapping<25>;
+def F12Dwarf : DwarfMapping<26>;
+def F14Dwarf : DwarfMapping<27>;
+
+def F9Dwarf : DwarfMapping<28>;
+def F11Dwarf : DwarfMapping<29>;
+def F13Dwarf : DwarfMapping<30>;
+def F15Dwarf : DwarfMapping<31>;
+
// Lower 32 bits of one of the 16 64-bit floating-point registers
class FPR32<bits<16> num, string n> : SystemZReg<n> {
let HWEncoding = num;
@@ -142,7 +165,7 @@ class FPR128<bits<16> num, string n, FPR64 low, FPR64 high>
foreach I = 0-15 in {
def F#I#S : FPR32<I, "f"#I>;
def F#I#D : FPR64<I, "f"#I, !cast<FPR32>("F"#I#"S")>,
- DwarfRegNum<[!add(I, 16)]>;
+ DwarfRegNum<[!cast<DwarfMapping>("F"#I#"Dwarf").Id]>;
}
foreach I = [0, 1, 4, 5, 8, 9, 12, 13] in {
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
index c7ebb5d6b4ec..a3cba64b9ed2 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.cpp
@@ -11,16 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-selectiondag-info"
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/SelectionDAG.h"
using namespace llvm;
-SystemZSelectionDAGInfo::
-SystemZSelectionDAGInfo(const SystemZTargetMachine &TM)
- : TargetSelectionDAGInfo(TM) {
-}
+#define DEBUG_TYPE "systemz-selectiondag-info"
+
+SystemZSelectionDAGInfo::SystemZSelectionDAGInfo(const DataLayout &DL)
+ : TargetSelectionDAGInfo(&DL) {}
SystemZSelectionDAGInfo::~SystemZSelectionDAGInfo() {
}
@@ -62,7 +61,7 @@ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
if (IsVolatile)
return SDValue();
- if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size))
+ if (auto *CSize = dyn_cast<ConstantSDNode>(Size))
return emitMemMem(DAG, DL, SystemZISD::MVC, SystemZISD::MVC_LOOP,
Chain, Dst, Src, CSize->getZExtValue());
return SDValue();
@@ -93,11 +92,11 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
if (IsVolatile)
return SDValue();
- if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
uint64_t Bytes = CSize->getZExtValue();
if (Bytes == 0)
return SDValue();
- if (ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte)) {
+ if (auto *CByte = dyn_cast<ConstantSDNode>(Byte)) {
// Handle cases that can be done using at most two of
// MVI, MVHI, MVHHI and MVGHI. The latter two can only be
// used if ByteVal is all zeros or all ones; in other casees,
@@ -137,7 +136,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
assert(Bytes >= 2 && "Should have dealt with 0- and 1-byte cases already");
// Handle the special case of a memset of 0, which can use XC.
- ConstantSDNode *CByte = dyn_cast<ConstantSDNode>(Byte);
+ auto *CByte = dyn_cast<ConstantSDNode>(Byte);
if (CByte && CByte->getZExtValue() == 0)
return emitMemMem(DAG, DL, SystemZISD::XC, SystemZISD::XC_LOOP,
Chain, Dst, Dst, Bytes);
@@ -194,7 +193,7 @@ EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src1, SDValue Src2, SDValue Size,
MachinePointerInfo Op1PtrInfo,
MachinePointerInfo Op2PtrInfo) const {
- if (ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(Size)) {
+ if (auto *CSize = dyn_cast<ConstantSDNode>(Size)) {
uint64_t Bytes = CSize->getZExtValue();
assert(Bytes > 0 && "Caller should have handled 0-size case");
Chain = emitCLC(DAG, DL, Chain, Src1, Src2, Bytes);
@@ -230,7 +229,7 @@ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
Ops.push_back(DAG.getConstant(SystemZ::CCMASK_SRST_FOUND, MVT::i32));
Ops.push_back(Glue);
VTs = DAG.getVTList(PtrVT, MVT::Glue);
- End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, &Ops[0], Ops.size());
+ End = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VTs, Ops);
return std::make_pair(End, Chain);
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
index 281d1e291dc9..e9de146af1d6 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSelectionDAGInfo.h
@@ -22,59 +22,56 @@ class SystemZTargetMachine;
class SystemZSelectionDAGInfo : public TargetSelectionDAGInfo {
public:
- explicit SystemZSelectionDAGInfo(const SystemZTargetMachine &TM);
+ explicit SystemZSelectionDAGInfo(const DataLayout &DL);
~SystemZSelectionDAGInfo();
- virtual
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool IsVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
- MachinePointerInfo SrcPtrInfo) const
- LLVM_OVERRIDE;
+ MachinePointerInfo SrcPtrInfo) const override;
- virtual SDValue
- EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL,
- SDValue Chain, SDValue Dst, SDValue Byte,
- SDValue Size, unsigned Align, bool IsVolatile,
- MachinePointerInfo DstPtrInfo) const LLVM_OVERRIDE;
+ SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc DL,
+ SDValue Chain, SDValue Dst, SDValue Byte,
+ SDValue Size, unsigned Align, bool IsVolatile,
+ MachinePointerInfo DstPtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src1, SDValue Src2, SDValue Size,
MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo Op2PtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src, SDValue Char, SDValue Length,
- MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo SrcPtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Dest, SDValue Src,
MachinePointerInfo DestPtrInfo,
MachinePointerInfo SrcPtrInfo,
- bool isStpcpy) const LLVM_OVERRIDE;
+ bool isStpcpy) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src1, SDValue Src2,
MachinePointerInfo Op1PtrInfo,
- MachinePointerInfo Op2PtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo Op2PtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
- SDValue Src, MachinePointerInfo SrcPtrInfo) const
- LLVM_OVERRIDE;
+ SDValue Src,
+ MachinePointerInfo SrcPtrInfo) const override;
- virtual std::pair<SDValue, SDValue>
+ std::pair<SDValue, SDValue>
EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
SDValue Src, SDValue MaxLength,
- MachinePointerInfo SrcPtrInfo) const LLVM_OVERRIDE;
+ MachinePointerInfo SrcPtrInfo) const override;
};
-}
+} // end namespace llvm
#endif
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
index 537a54554045..aad899c41c0f 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZShortenInst.cpp
@@ -13,47 +13,47 @@
//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "systemz-shorten-inst"
-
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
using namespace llvm;
+#define DEBUG_TYPE "systemz-shorten-inst"
+
namespace {
- class SystemZShortenInst : public MachineFunctionPass {
- public:
- static char ID;
- SystemZShortenInst(const SystemZTargetMachine &tm);
+class SystemZShortenInst : public MachineFunctionPass {
+public:
+ static char ID;
+ SystemZShortenInst(const SystemZTargetMachine &tm);
- virtual const char *getPassName() const {
- return "SystemZ Instruction Shortening";
- }
+ const char *getPassName() const override {
+ return "SystemZ Instruction Shortening";
+ }
- bool processBlock(MachineBasicBlock *MBB);
- bool runOnMachineFunction(MachineFunction &F);
+ bool processBlock(MachineBasicBlock &MBB);
+ bool runOnMachineFunction(MachineFunction &F) override;
- private:
- bool shortenIIF(MachineInstr &MI, unsigned *GPRMap, unsigned LiveOther,
- unsigned LLIxL, unsigned LLIxH);
+private:
+ bool shortenIIF(MachineInstr &MI, unsigned *GPRMap, unsigned LiveOther,
+ unsigned LLIxL, unsigned LLIxH);
- const SystemZInstrInfo *TII;
+ const SystemZInstrInfo *TII;
- // LowGPRs[I] has bit N set if LLVM register I includes the low
- // word of GPR N. HighGPRs is the same for the high word.
- unsigned LowGPRs[SystemZ::NUM_TARGET_REGS];
- unsigned HighGPRs[SystemZ::NUM_TARGET_REGS];
- };
+ // LowGPRs[I] has bit N set if LLVM register I includes the low
+ // word of GPR N. HighGPRs is the same for the high word.
+ unsigned LowGPRs[SystemZ::NUM_TARGET_REGS];
+ unsigned HighGPRs[SystemZ::NUM_TARGET_REGS];
+};
- char SystemZShortenInst::ID = 0;
-} // end of anonymous namespace
+char SystemZShortenInst::ID = 0;
+} // end anonymous namespace
FunctionPass *llvm::createSystemZShortenInstPass(SystemZTargetMachine &TM) {
return new SystemZShortenInst(TM);
}
SystemZShortenInst::SystemZShortenInst(const SystemZTargetMachine &tm)
- : MachineFunctionPass(ID), TII(0), LowGPRs(), HighGPRs() {
+ : MachineFunctionPass(ID), TII(nullptr), LowGPRs(), HighGPRs() {
// Set up LowGPRs and HighGPRs.
for (unsigned I = 0; I < 16; ++I) {
LowGPRs[SystemZMC::GR32Regs[I]] |= 1 << I;
@@ -98,16 +98,15 @@ bool SystemZShortenInst::shortenIIF(MachineInstr &MI, unsigned *GPRMap,
}
// Process all instructions in MBB. Return true if something changed.
-bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
+bool SystemZShortenInst::processBlock(MachineBasicBlock &MBB) {
bool Changed = false;
// Work out which words are live on exit from the block.
unsigned LiveLow = 0;
unsigned LiveHigh = 0;
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI) {
- for (MachineBasicBlock::livein_iterator LI = (*SI)->livein_begin(),
- LE = (*SI)->livein_end(); LI != LE; ++LI) {
+ for (auto SI = MBB.succ_begin(), SE = MBB.succ_end(); SI != SE; ++SI) {
+ for (auto LI = (*SI)->livein_begin(), LE = (*SI)->livein_end();
+ LI != LE; ++LI) {
unsigned Reg = *LI;
assert(Reg < SystemZ::NUM_TARGET_REGS && "Invalid register number");
LiveLow |= LowGPRs[Reg];
@@ -116,8 +115,7 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
}
// Iterate backwards through the block looking for instructions to change.
- for (MachineBasicBlock::reverse_iterator MBBI = MBB->rbegin(),
- MBBE = MBB->rend(); MBBI != MBBE; ++MBBI) {
+ for (auto MBBI = MBB.rbegin(), MBBE = MBB.rend(); MBBI != MBBE; ++MBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
if (Opcode == SystemZ::IILF)
@@ -128,8 +126,8 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
SystemZ::LLIHH);
unsigned UsedLow = 0;
unsigned UsedHigh = 0;
- for (MachineInstr::mop_iterator MOI = MI.operands_begin(),
- MOE = MI.operands_end(); MOI != MOE; ++MOI) {
+ for (auto MOI = MI.operands_begin(), MOE = MI.operands_end();
+ MOI != MOE; ++MOI) {
MachineOperand &MO = *MOI;
if (MO.isReg()) {
if (unsigned Reg = MO.getReg()) {
@@ -155,9 +153,8 @@ bool SystemZShortenInst::runOnMachineFunction(MachineFunction &F) {
TII = static_cast<const SystemZInstrInfo *>(F.getTarget().getInstrInfo());
bool Changed = false;
- for (MachineFunction::iterator MFI = F.begin(), MFE = F.end();
- MFI != MFE; ++MFI)
- Changed |= processBlock(MFI);
+ for (auto &MBB : F)
+ Changed |= processBlock(MBB);
return Changed;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
index 3971d5e2a5fa..e160bc86f225 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.cpp
@@ -8,25 +8,23 @@
//===----------------------------------------------------------------------===//
#include "SystemZSubtarget.h"
+#include "MCTargetDesc/SystemZMCTargetDesc.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/Host.h"
-#include "MCTargetDesc/SystemZMCTargetDesc.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "systemz-subtarget"
#define GET_SUBTARGETINFO_TARGET_DESC
#define GET_SUBTARGETINFO_CTOR
#include "SystemZGenSubtargetInfo.inc"
-using namespace llvm;
-
-// Pin the vtabel to this file.
+// Pin the vtable to this file.
void SystemZSubtarget::anchor() {}
-SystemZSubtarget::SystemZSubtarget(const std::string &TT,
- const std::string &CPU,
- const std::string &FS)
- : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
- HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false),
- TargetTriple(TT) {
+SystemZSubtarget &
+SystemZSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
std::string CPUName = CPU;
if (CPUName.empty())
CPUName = "generic";
@@ -34,11 +32,26 @@ SystemZSubtarget::SystemZSubtarget(const std::string &TT,
if (CPUName == "generic")
CPUName = sys::getHostCPUName();
#endif
-
// Parse features string.
ParseSubtargetFeatures(CPUName, FS);
+ return *this;
}
+SystemZSubtarget::SystemZSubtarget(const std::string &TT,
+ const std::string &CPU,
+ const std::string &FS,
+ const TargetMachine &TM)
+ : SystemZGenSubtargetInfo(TT, CPU, FS), HasDistinctOps(false),
+ HasLoadStoreOnCond(false), HasHighWord(false), HasFPExtension(false),
+ HasFastSerialization(false), HasInterlockedAccess1(false),
+ TargetTriple(TT),
+ // Make sure that global data has at least 16 bits of alignment by
+ // default, so that we can refer to it using LARL. We don't have any
+ // special requirements for stack variables though.
+ DL("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM),
+ TSInfo(DL), FrameLowering() {}
+
// Return true if GV binds locally under reloc model RM.
static bool bindsLocally(const GlobalValue *GV, Reloc::Model RM) {
// For non-PIC, all symbols bind locally.
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
index 5817491d4585..4e8c710bdefd 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZSubtarget.h
@@ -14,6 +14,12 @@
#ifndef SYSTEMZSUBTARGET_H
#define SYSTEMZSUBTARGET_H
+#include "SystemZFrameLowering.h"
+#include "SystemZISelLowering.h"
+#include "SystemZInstrInfo.h"
+#include "SystemZRegisterInfo.h"
+#include "SystemZSelectionDAGInfo.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <string>
@@ -32,16 +38,34 @@ protected:
bool HasLoadStoreOnCond;
bool HasHighWord;
bool HasFPExtension;
+ bool HasFastSerialization;
+ bool HasInterlockedAccess1;
private:
Triple TargetTriple;
-
+ const DataLayout DL;
+ SystemZInstrInfo InstrInfo;
+ SystemZTargetLowering TLInfo;
+ SystemZSelectionDAGInfo TSInfo;
+ SystemZFrameLowering FrameLowering;
+
+ SystemZSubtarget &initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS);
public:
SystemZSubtarget(const std::string &TT, const std::string &CPU,
- const std::string &FS);
+ const std::string &FS, const TargetMachine &TM);
+
+ const TargetFrameLowering *getFrameLowering() const { return &FrameLowering; }
+ const SystemZInstrInfo *getInstrInfo() const { return &InstrInfo; }
+ const DataLayout *getDataLayout() const { return &DL; }
+ const SystemZRegisterInfo *getRegisterInfo() const {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const SystemZTargetLowering *getTargetLowering() const { return &TLInfo; }
+ const TargetSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; }
// This is important for reducing register pressure in vector code.
- virtual bool useAA() const LLVM_OVERRIDE { return true; }
+ bool useAA() const override { return true; }
// Automatically generated by tblgen.
void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
@@ -58,6 +82,12 @@ public:
// Return true if the target has the floating-point extension facility.
bool hasFPExtension() const { return HasFPExtension; }
+ // Return true if the target has the fast-serialization facility.
+ bool hasFastSerialization() const { return HasFastSerialization; }
+
+ // Return true if the target has interlocked-access facility 1.
+ bool hasInterlockedAccess1() const { return HasInterlockedAccess1; }
+
// Return true if GV can be accessed using LARL for reloc model RM
// and code model CM.
bool isPC32DBLSymbol(const GlobalValue *GV, Reloc::Model RM,
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
index dee92e960c54..0122e99f8a77 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp
@@ -22,18 +22,10 @@ extern "C" void LLVMInitializeSystemZTarget() {
SystemZTargetMachine::SystemZTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Reloc::Model RM,
- CodeModel::Model CM,
+ Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL)
- : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
- Subtarget(TT, CPU, FS),
- // Make sure that global data has at least 16 bits of alignment by default,
- // so that we can refer to it using LARL. We don't have any special
- // requirements for stack variables though.
- DL("E-p:64:64:64-i1:8:16-i8:8:16-i16:16-i32:32-i64:64"
- "-f32:32-f64:64-f128:64-a0:8:16-n32:64"),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(*this, Subtarget) {
+ : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
+ Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
}
@@ -48,10 +40,10 @@ public:
return getTM<SystemZTargetMachine>();
}
- virtual void addIRPasses() LLVM_OVERRIDE;
- virtual bool addInstSelector() LLVM_OVERRIDE;
- virtual bool addPreSched2() LLVM_OVERRIDE;
- virtual bool addPreEmitPass() LLVM_OVERRIDE;
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ bool addPreSched2() override;
+ bool addPreEmitPass() override;
};
} // end anonymous namespace
@@ -66,7 +58,8 @@ bool SystemZPassConfig::addInstSelector() {
}
bool SystemZPassConfig::addPreSched2() {
- if (getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
+ if (getOptLevel() != CodeGenOpt::None &&
+ getSystemZTargetMachine().getSubtargetImpl()->hasLoadStoreOnCond())
addPass(&IfConverterID);
return true;
}
diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
index a99a98e08477..ded07e912443 100644
--- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
+++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetMachine.h
@@ -15,25 +15,15 @@
#ifndef SYSTEMZTARGETMACHINE_H
#define SYSTEMZTARGETMACHINE_H
-#include "SystemZFrameLowering.h"
-#include "SystemZISelLowering.h"
-#include "SystemZInstrInfo.h"
-#include "SystemZRegisterInfo.h"
#include "SystemZSubtarget.h"
-#include "SystemZSelectionDAGInfo.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
namespace llvm {
+class TargetFrameLowering;
+
class SystemZTargetMachine : public LLVMTargetMachine {
SystemZSubtarget Subtarget;
- const DataLayout DL;
- SystemZInstrInfo InstrInfo;
- SystemZTargetLowering TLInfo;
- SystemZSelectionDAGInfo TSInfo;
- SystemZFrameLowering FrameLowering;
public:
SystemZTargetMachine(const Target &T, StringRef TT, StringRef CPU,
@@ -42,31 +32,30 @@ public:
CodeGenOpt::Level OL);
// Override TargetMachine.
- virtual const TargetFrameLowering *getFrameLowering() const LLVM_OVERRIDE {
- return &FrameLowering;
+ const TargetFrameLowering *getFrameLowering() const override {
+ return getSubtargetImpl()->getFrameLowering();
}
- virtual const SystemZInstrInfo *getInstrInfo() const LLVM_OVERRIDE {
- return &InstrInfo;
+ const SystemZInstrInfo *getInstrInfo() const override {
+ return getSubtargetImpl()->getInstrInfo();
}
- virtual const SystemZSubtarget *getSubtargetImpl() const LLVM_OVERRIDE {
+ const SystemZSubtarget *getSubtargetImpl() const override {
return &Subtarget;
}
- virtual const DataLayout *getDataLayout() const LLVM_OVERRIDE {
- return &DL;
+ const DataLayout *getDataLayout() const override {
+ return getSubtargetImpl()->getDataLayout();
}
- virtual const SystemZRegisterInfo *getRegisterInfo() const LLVM_OVERRIDE {
- return &InstrInfo.getRegisterInfo();
+ const SystemZRegisterInfo *getRegisterInfo() const override {
+ return getSubtargetImpl()->getRegisterInfo();
}
- virtual const SystemZTargetLowering *getTargetLowering() const LLVM_OVERRIDE {
- return &TLInfo;
+ const SystemZTargetLowering *getTargetLowering() const override {
+ return getSubtargetImpl()->getTargetLowering();
}
- virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const
- LLVM_OVERRIDE {
- return &TSInfo;
+ const TargetSelectionDAGInfo *getSelectionDAGInfo() const override {
+ return getSubtargetImpl()->getSelectionDAGInfo();
}
// Override LLVMTargetMachine
- virtual TargetPassConfig *createPassConfig(PassManagerBase &PM) LLVM_OVERRIDE;
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
};
} // end namespace llvm