aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/RISCV')
-rw-r--r--lib/Target/RISCV/AsmParser/LLVMBuild.txt2
-rw-r--r--lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp688
-rw-r--r--lib/Target/RISCV/CMakeLists.txt3
-rw-r--r--lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp38
-rw-r--r--lib/Target/RISCV/InstPrinter/LLVMBuild.txt2
-rw-r--r--lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp33
-rw-r--r--lib/Target/RISCV/InstPrinter/RISCVInstPrinter.h4
-rw-r--r--lib/Target/RISCV/LLVMBuild.txt4
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp130
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h113
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp4
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h4
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp4
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp83
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h10
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp16
-rw-r--r--lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h8
-rw-r--r--lib/Target/RISCV/RISCV.h6
-rw-r--r--lib/Target/RISCV/RISCV.td6
-rw-r--r--lib/Target/RISCV/RISCVExpandPseudoInsts.cpp556
-rw-r--r--lib/Target/RISCV/RISCVFrameLowering.cpp3
-rw-r--r--lib/Target/RISCV/RISCVISelDAGToDAG.cpp126
-rw-r--r--lib/Target/RISCV/RISCVISelLowering.cpp328
-rw-r--r--lib/Target/RISCV/RISCVISelLowering.h24
-rw-r--r--lib/Target/RISCV/RISCVInstrFormats.td9
-rw-r--r--lib/Target/RISCV/RISCVInstrFormatsC.td13
-rw-r--r--lib/Target/RISCV/RISCVInstrInfo.cpp3
-rw-r--r--lib/Target/RISCV/RISCVInstrInfo.td271
-rw-r--r--lib/Target/RISCV/RISCVInstrInfoA.td189
-rw-r--r--lib/Target/RISCV/RISCVInstrInfoC.td39
-rw-r--r--lib/Target/RISCV/RISCVInstrInfoD.td35
-rw-r--r--lib/Target/RISCV/RISCVInstrInfoF.td37
-rw-r--r--lib/Target/RISCV/RISCVInstrInfoM.td31
-rw-r--r--lib/Target/RISCV/RISCVMergeBaseOffset.cpp2
-rw-r--r--lib/Target/RISCV/RISCVSystemOperands.td352
-rw-r--r--lib/Target/RISCV/RISCVTargetMachine.cpp18
-rw-r--r--lib/Target/RISCV/Utils/CMakeLists.txt4
-rw-r--r--lib/Target/RISCV/Utils/LLVMBuild.txt24
-rw-r--r--lib/Target/RISCV/Utils/RISCVBaseInfo.cpp9
-rw-r--r--lib/Target/RISCV/Utils/RISCVBaseInfo.h (renamed from lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h)56
-rw-r--r--lib/Target/RISCV/Utils/RISCVMatInt.cpp79
-rw-r--r--lib/Target/RISCV/Utils/RISCVMatInt.h36
42 files changed, 2852 insertions, 550 deletions
diff --git a/lib/Target/RISCV/AsmParser/LLVMBuild.txt b/lib/Target/RISCV/AsmParser/LLVMBuild.txt
index a9ad92c872ea..205d1248f923 100644
--- a/lib/Target/RISCV/AsmParser/LLVMBuild.txt
+++ b/lib/Target/RISCV/AsmParser/LLVMBuild.txt
@@ -19,5 +19,5 @@
type = Library
name = RISCVAsmParser
parent = RISCV
-required_libraries = MC MCParser RISCVDesc RISCVInfo Support
+required_libraries = MC MCParser RISCVDesc RISCVInfo RISCVUtils Support
add_to_library_groups = RISCV
diff --git a/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 9a455c105482..1d1112cc5124 100644
--- a/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -7,12 +7,16 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "MCTargetDesc/RISCVAsmBackend.h"
#include "MCTargetDesc/RISCVMCExpr.h"
#include "MCTargetDesc/RISCVMCTargetDesc.h"
#include "MCTargetDesc/RISCVTargetStreamer.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "Utils/RISCVMatInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
@@ -39,6 +43,8 @@ namespace {
struct RISCVOperand;
class RISCVAsmParser : public MCTargetAsmParser {
+ SmallVector<FeatureBitset, 4> FeatureBitStack;
+
SMLoc getLoc() const { return getParser().getTok().getLoc(); }
bool isRV64() const { return getSTI().hasFeature(RISCV::Feature64Bit); }
@@ -73,6 +79,9 @@ class RISCVAsmParser : public MCTargetAsmParser {
// synthesize the desired immedate value into the destination register.
void emitLoadImm(unsigned DestReg, int64_t Value, MCStreamer &Out);
+ // Helper to emit pseudo instruction "lla" used in PC-rel addressing.
+ void emitLoadLocalAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
+
/// Helper for processing MC instructions that have been successfully matched
/// by MatchAndEmitInstruction. Modifications to the emitted instructions,
/// like the expansion of pseudo instructions (e.g., "li"), can be performed
@@ -83,13 +92,16 @@ class RISCVAsmParser : public MCTargetAsmParser {
#define GET_ASSEMBLER_HEADER
#include "RISCVGenAsmMatcher.inc"
+ OperandMatchResultTy parseCSRSystemRegister(OperandVector &Operands);
OperandMatchResultTy parseImmediate(OperandVector &Operands);
OperandMatchResultTy parseRegister(OperandVector &Operands,
bool AllowParens = false);
OperandMatchResultTy parseMemOpBaseReg(OperandVector &Operands);
OperandMatchResultTy parseOperandWithModifier(OperandVector &Operands);
+ OperandMatchResultTy parseBareSymbol(OperandVector &Operands);
+ OperandMatchResultTy parseJALOffset(OperandVector &Operands);
- bool parseOperand(OperandVector &Operands, bool ForceImmediate);
+ bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
bool parseDirectiveOption();
@@ -108,6 +120,21 @@ class RISCVAsmParser : public MCTargetAsmParser {
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
}
}
+
+ void pushFeatureBits() {
+ FeatureBitStack.push_back(getSTI().getFeatureBits());
+ }
+
+ bool popFeatureBits() {
+ if (FeatureBitStack.empty())
+ return true;
+
+ FeatureBitset FeatureBits = FeatureBitStack.pop_back_val();
+ copySTI().setFeatureBits(FeatureBits);
+ setAvailableFeatures(ComputeAvailableFeatures(FeatureBits));
+
+ return false;
+ }
public:
enum RISCVMatchResultTy {
Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY,
@@ -139,6 +166,7 @@ struct RISCVOperand : public MCParsedAsmOperand {
Token,
Register,
Immediate,
+ SystemRegister
} Kind;
bool IsRV64;
@@ -151,11 +179,20 @@ struct RISCVOperand : public MCParsedAsmOperand {
const MCExpr *Val;
};
+ struct SysRegOp {
+ const char *Data;
+ unsigned Length;
+ unsigned Encoding;
+ // FIXME: Add the Encoding parsed fields as needed for checks,
+ // e.g.: read/write or user/supervisor/machine privileges.
+ };
+
SMLoc StartLoc, EndLoc;
union {
StringRef Tok;
RegOp Reg;
ImmOp Imm;
+ struct SysRegOp SysReg;
};
RISCVOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
@@ -176,6 +213,9 @@ public:
case Token:
Tok = o.Tok;
break;
+ case SystemRegister:
+ SysReg = o.SysReg;
+ break;
}
}
@@ -183,19 +223,22 @@ public:
bool isReg() const override { return Kind == Register; }
bool isImm() const override { return Kind == Immediate; }
bool isMem() const override { return false; }
+ bool isSystemRegister() const { return Kind == SystemRegister; }
- bool evaluateConstantImm(int64_t &Imm, RISCVMCExpr::VariantKind &VK) const {
- const MCExpr *Val = getImm();
- bool Ret = false;
- if (auto *RE = dyn_cast<RISCVMCExpr>(Val)) {
- Ret = RE->evaluateAsConstant(Imm);
+ static bool evaluateConstantImm(const MCExpr *Expr, int64_t &Imm,
+ RISCVMCExpr::VariantKind &VK) {
+ if (auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
VK = RE->getKind();
- } else if (auto CE = dyn_cast<MCConstantExpr>(Val)) {
- Ret = true;
+ return RE->evaluateAsConstant(Imm);
+ }
+
+ if (auto CE = dyn_cast<MCConstantExpr>(Expr)) {
VK = RISCVMCExpr::VK_RISCV_None;
Imm = CE->getValue();
+ return true;
}
- return Ret;
+
+ return false;
}
// True if operand is a symbol with no modifiers, or a constant with no
@@ -205,7 +248,7 @@ public:
RISCVMCExpr::VariantKind VK;
if (!isImm())
return false;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
bool IsValid;
if (!IsConstantImm)
IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
@@ -220,12 +263,14 @@ public:
int64_t Imm;
RISCVMCExpr::VariantKind VK;
// Must be of 'immediate' type but not a constant.
- if (!isImm() || evaluateConstantImm(Imm, VK))
+ if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
return false;
return RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
+ bool isCSRSystemRegister() const { return isSystemRegister(); }
+
/// Return true if the operand is a valid for the fence instruction e.g.
/// ('iorw').
bool isFenceArg() const {
@@ -265,12 +310,14 @@ public:
return RISCVFPRndMode::stringToRoundingMode(Str) != RISCVFPRndMode::Invalid;
}
- bool isImmXLen() const {
+ bool isImmXLenLI() const {
int64_t Imm;
RISCVMCExpr::VariantKind VK;
if (!isImm())
return false;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ if (VK == RISCVMCExpr::VK_RISCV_LO || VK == RISCVMCExpr::VK_RISCV_PCREL_LO)
+ return true;
// Given only Imm, ensuring that the actually specified constant is either
// a signed or unsigned 64-bit number is unfortunately impossible.
bool IsInRange = isRV64() ? true : isInt<32>(Imm) || isUInt<32>(Imm);
@@ -282,7 +329,8 @@ public:
RISCVMCExpr::VariantKind VK;
if (!isImm())
return false;
- if (!evaluateConstantImm(Imm, VK) || VK != RISCVMCExpr::VK_RISCV_None)
+ if (!evaluateConstantImm(getImm(), Imm, VK) ||
+ VK != RISCVMCExpr::VK_RISCV_None)
return false;
return (isRV64() && isUInt<6>(Imm)) || isUInt<5>(Imm);
}
@@ -292,7 +340,8 @@ public:
RISCVMCExpr::VariantKind VK;
if (!isImm())
return false;
- if (!evaluateConstantImm(Imm, VK) || VK != RISCVMCExpr::VK_RISCV_None)
+ if (!evaluateConstantImm(getImm(), Imm, VK) ||
+ VK != RISCVMCExpr::VK_RISCV_None)
return false;
if (Imm == 0)
return false;
@@ -304,7 +353,7 @@ public:
RISCVMCExpr::VariantKind VK;
if (!isImm())
return false;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && isUInt<5>(Imm) && VK == RISCVMCExpr::VK_RISCV_None;
}
@@ -313,66 +362,68 @@ public:
RISCVMCExpr::VariantKind VK;
if (!isImm())
return false;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && isUInt<5>(Imm) && (Imm != 0) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
bool isSImm6() const {
+ if (!isImm())
+ return false;
RISCVMCExpr::VariantKind VK;
int64_t Imm;
- bool IsValid;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
- if (!IsConstantImm)
- IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
- else
- IsValid = isInt<6>(Imm);
- return IsValid &&
- (VK == RISCVMCExpr::VK_RISCV_None || VK == RISCVMCExpr::VK_RISCV_LO);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isInt<6>(Imm) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
}
bool isSImm6NonZero() const {
+ if (!isImm())
+ return false;
RISCVMCExpr::VariantKind VK;
int64_t Imm;
- bool IsValid;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
- if (!IsConstantImm)
- IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
- else
- IsValid = ((Imm != 0) && isInt<6>(Imm));
- return IsValid &&
- (VK == RISCVMCExpr::VK_RISCV_None || VK == RISCVMCExpr::VK_RISCV_LO);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ return IsConstantImm && isInt<6>(Imm) && (Imm != 0) &&
+ VK == RISCVMCExpr::VK_RISCV_None;
}
bool isCLUIImm() const {
+ if (!isImm())
+ return false;
int64_t Imm;
RISCVMCExpr::VariantKind VK;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && (Imm != 0) &&
(isUInt<5>(Imm) || (Imm >= 0xfffe0 && Imm <= 0xfffff)) &&
- VK == RISCVMCExpr::VK_RISCV_None;
+ VK == RISCVMCExpr::VK_RISCV_None;
}
bool isUImm7Lsb00() const {
+ if (!isImm())
+ return false;
int64_t Imm;
RISCVMCExpr::VariantKind VK;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && isShiftedUInt<5, 2>(Imm) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
bool isUImm8Lsb00() const {
+ if (!isImm())
+ return false;
int64_t Imm;
RISCVMCExpr::VariantKind VK;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && isShiftedUInt<6, 2>(Imm) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
bool isUImm8Lsb000() const {
+ if (!isImm())
+ return false;
int64_t Imm;
RISCVMCExpr::VariantKind VK;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && isShiftedUInt<5, 3>(Imm) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
@@ -380,17 +431,21 @@ public:
bool isSImm9Lsb0() const { return isBareSimmNLsb0<9>(); }
bool isUImm9Lsb000() const {
+ if (!isImm())
+ return false;
int64_t Imm;
RISCVMCExpr::VariantKind VK;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && isShiftedUInt<6, 3>(Imm) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
bool isUImm10Lsb00NonZero() const {
+ if (!isImm())
+ return false;
int64_t Imm;
RISCVMCExpr::VariantKind VK;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && isShiftedUInt<8, 2>(Imm) && (Imm != 0) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
@@ -401,54 +456,63 @@ public:
bool IsValid;
if (!isImm())
return false;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
if (!IsConstantImm)
IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
else
IsValid = isInt<12>(Imm);
- return IsValid && (VK == RISCVMCExpr::VK_RISCV_None ||
+ return IsValid && ((IsConstantImm && VK == RISCVMCExpr::VK_RISCV_None) ||
VK == RISCVMCExpr::VK_RISCV_LO ||
VK == RISCVMCExpr::VK_RISCV_PCREL_LO);
}
bool isSImm12Lsb0() const { return isBareSimmNLsb0<12>(); }
- bool isUImm12() const {
- int64_t Imm;
- RISCVMCExpr::VariantKind VK;
- if (!isImm())
- return false;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
- return IsConstantImm && isUInt<12>(Imm) && VK == RISCVMCExpr::VK_RISCV_None;
- }
-
bool isSImm13Lsb0() const { return isBareSimmNLsb0<13>(); }
bool isSImm10Lsb0000NonZero() const {
+ if (!isImm())
+ return false;
int64_t Imm;
RISCVMCExpr::VariantKind VK;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
return IsConstantImm && (Imm != 0) && isShiftedInt<6, 4>(Imm) &&
VK == RISCVMCExpr::VK_RISCV_None;
}
- bool isUImm20() const {
+ bool isUImm20LUI() const {
RISCVMCExpr::VariantKind VK;
int64_t Imm;
bool IsValid;
if (!isImm())
return false;
- bool IsConstantImm = evaluateConstantImm(Imm, VK);
- if (!IsConstantImm)
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ if (!IsConstantImm) {
IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
- else
- IsValid = isUInt<20>(Imm);
- return IsValid && (VK == RISCVMCExpr::VK_RISCV_None ||
- VK == RISCVMCExpr::VK_RISCV_HI ||
- VK == RISCVMCExpr::VK_RISCV_PCREL_HI);
+ return IsValid && VK == RISCVMCExpr::VK_RISCV_HI;
+ } else {
+ return isUInt<20>(Imm) && (VK == RISCVMCExpr::VK_RISCV_None ||
+ VK == RISCVMCExpr::VK_RISCV_HI);
+ }
+ }
+
+ bool isUImm20AUIPC() const {
+ RISCVMCExpr::VariantKind VK;
+ int64_t Imm;
+ bool IsValid;
+ if (!isImm())
+ return false;
+ bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
+ if (!IsConstantImm) {
+ IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK, Imm);
+ return IsValid && VK == RISCVMCExpr::VK_RISCV_PCREL_HI;
+ } else {
+ return isUInt<20>(Imm) && (VK == RISCVMCExpr::VK_RISCV_None ||
+ VK == RISCVMCExpr::VK_RISCV_PCREL_HI);
+ }
}
- bool isSImm21Lsb0() const { return isBareSimmNLsb0<21>(); }
+ bool isSImm21Lsb0JAL() const { return isBareSimmNLsb0<21>(); }
/// getStartLoc - Gets location of the first token of this operand
SMLoc getStartLoc() const override { return StartLoc; }
@@ -462,6 +526,11 @@ public:
return Reg.RegNum;
}
+ StringRef getSysReg() const {
+ assert(Kind == SystemRegister && "Invalid access!");
+ return StringRef(SysReg.Data, SysReg.Length);
+ }
+
const MCExpr *getImm() const {
assert(Kind == Immediate && "Invalid type access!");
return Imm.Val;
@@ -484,6 +553,9 @@ public:
case Token:
OS << "'" << getToken() << "'";
break;
+ case SystemRegister:
+ OS << "<sysreg: " << getSysReg() << '>';
+ break;
}
}
@@ -517,16 +589,22 @@ public:
return Op;
}
+ static std::unique_ptr<RISCVOperand>
+ createSysReg(StringRef Str, SMLoc S, unsigned Encoding, bool IsRV64) {
+ auto Op = make_unique<RISCVOperand>(SystemRegister);
+ Op->SysReg.Data = Str.data();
+ Op->SysReg.Length = Str.size();
+ Op->SysReg.Encoding = Encoding;
+ Op->StartLoc = S;
+ Op->IsRV64 = IsRV64;
+ return Op;
+ }
+
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
assert(Expr && "Expr shouldn't be null!");
int64_t Imm = 0;
- bool IsConstant = false;
- if (auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
- IsConstant = RE->evaluateAsConstant(Imm);
- } else if (auto *CE = dyn_cast<MCConstantExpr>(Expr)) {
- IsConstant = true;
- Imm = CE->getValue();
- }
+ RISCVMCExpr::VariantKind VK;
+ bool IsConstant = evaluateConstantImm(Expr, Imm, VK);
if (IsConstant)
Inst.addOperand(MCOperand::createImm(Imm));
@@ -553,16 +631,22 @@ public:
unsigned Imm = 0;
for (char c : SE->getSymbol().getName()) {
switch (c) {
- default: llvm_unreachable("FenceArg must contain only [iorw]");
- case 'i': Imm |= RISCVFenceField::I; break;
- case 'o': Imm |= RISCVFenceField::O; break;
- case 'r': Imm |= RISCVFenceField::R; break;
- case 'w': Imm |= RISCVFenceField::W; break;
+ default:
+ llvm_unreachable("FenceArg must contain only [iorw]");
+ case 'i': Imm |= RISCVFenceField::I; break;
+ case 'o': Imm |= RISCVFenceField::O; break;
+ case 'r': Imm |= RISCVFenceField::R; break;
+ case 'w': Imm |= RISCVFenceField::W; break;
}
}
Inst.addOperand(MCOperand::createImm(Imm));
}
+ void addCSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createImm(SysReg.Encoding));
+ }
+
// Returns the rounding mode represented by this RISCVOperand. Should only
// be called after checking isFRMArg.
RISCVFPRndMode::RoundingMode getRoundingMode() const {
@@ -590,40 +674,40 @@ public:
// information from TableGen.
unsigned convertFPR32ToFPR64(unsigned Reg) {
switch (Reg) {
- default:
- llvm_unreachable("Not a recognised FPR32 register");
- case RISCV::F0_32: return RISCV::F0_64;
- case RISCV::F1_32: return RISCV::F1_64;
- case RISCV::F2_32: return RISCV::F2_64;
- case RISCV::F3_32: return RISCV::F3_64;
- case RISCV::F4_32: return RISCV::F4_64;
- case RISCV::F5_32: return RISCV::F5_64;
- case RISCV::F6_32: return RISCV::F6_64;
- case RISCV::F7_32: return RISCV::F7_64;
- case RISCV::F8_32: return RISCV::F8_64;
- case RISCV::F9_32: return RISCV::F9_64;
- case RISCV::F10_32: return RISCV::F10_64;
- case RISCV::F11_32: return RISCV::F11_64;
- case RISCV::F12_32: return RISCV::F12_64;
- case RISCV::F13_32: return RISCV::F13_64;
- case RISCV::F14_32: return RISCV::F14_64;
- case RISCV::F15_32: return RISCV::F15_64;
- case RISCV::F16_32: return RISCV::F16_64;
- case RISCV::F17_32: return RISCV::F17_64;
- case RISCV::F18_32: return RISCV::F18_64;
- case RISCV::F19_32: return RISCV::F19_64;
- case RISCV::F20_32: return RISCV::F20_64;
- case RISCV::F21_32: return RISCV::F21_64;
- case RISCV::F22_32: return RISCV::F22_64;
- case RISCV::F23_32: return RISCV::F23_64;
- case RISCV::F24_32: return RISCV::F24_64;
- case RISCV::F25_32: return RISCV::F25_64;
- case RISCV::F26_32: return RISCV::F26_64;
- case RISCV::F27_32: return RISCV::F27_64;
- case RISCV::F28_32: return RISCV::F28_64;
- case RISCV::F29_32: return RISCV::F29_64;
- case RISCV::F30_32: return RISCV::F30_64;
- case RISCV::F31_32: return RISCV::F31_64;
+ default:
+ llvm_unreachable("Not a recognised FPR32 register");
+ case RISCV::F0_32: return RISCV::F0_64;
+ case RISCV::F1_32: return RISCV::F1_64;
+ case RISCV::F2_32: return RISCV::F2_64;
+ case RISCV::F3_32: return RISCV::F3_64;
+ case RISCV::F4_32: return RISCV::F4_64;
+ case RISCV::F5_32: return RISCV::F5_64;
+ case RISCV::F6_32: return RISCV::F6_64;
+ case RISCV::F7_32: return RISCV::F7_64;
+ case RISCV::F8_32: return RISCV::F8_64;
+ case RISCV::F9_32: return RISCV::F9_64;
+ case RISCV::F10_32: return RISCV::F10_64;
+ case RISCV::F11_32: return RISCV::F11_64;
+ case RISCV::F12_32: return RISCV::F12_64;
+ case RISCV::F13_32: return RISCV::F13_64;
+ case RISCV::F14_32: return RISCV::F14_64;
+ case RISCV::F15_32: return RISCV::F15_64;
+ case RISCV::F16_32: return RISCV::F16_64;
+ case RISCV::F17_32: return RISCV::F17_64;
+ case RISCV::F18_32: return RISCV::F18_64;
+ case RISCV::F19_32: return RISCV::F19_64;
+ case RISCV::F20_32: return RISCV::F20_64;
+ case RISCV::F21_32: return RISCV::F21_64;
+ case RISCV::F22_32: return RISCV::F22_64;
+ case RISCV::F23_32: return RISCV::F23_64;
+ case RISCV::F24_32: return RISCV::F24_64;
+ case RISCV::F25_32: return RISCV::F25_64;
+ case RISCV::F26_32: return RISCV::F26_64;
+ case RISCV::F27_32: return RISCV::F27_64;
+ case RISCV::F28_32: return RISCV::F28_64;
+ case RISCV::F29_32: return RISCV::F29_64;
+ case RISCV::F30_32: return RISCV::F30_64;
+ case RISCV::F31_32: return RISCV::F31_64;
}
}
@@ -663,7 +747,9 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
bool MatchingInlineAsm) {
MCInst Inst;
- switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
+ auto Result =
+ MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
+ switch (Result) {
default:
break;
case Match_Success:
@@ -684,7 +770,21 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
}
return Error(ErrorLoc, "invalid operand for instruction");
}
- case Match_InvalidImmXLen:
+ }
+
+ // Handle the case when the error message is of specific type
+ // other than the generic Match_InvalidOperand, and the
+ // corresponding operand is missing.
+ if (Result > FIRST_TARGET_MATCH_RESULT_TY) {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
+ return Error(ErrorLoc, "too few operands for instruction");
+ }
+
+ switch(Result) {
+ default:
+ break;
+ case Match_InvalidImmXLenLI:
if (isRV64()) {
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
return Error(ErrorLoc, "operand must be a constant 64-bit integer");
@@ -706,8 +806,8 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 5),
(1 << 5) - 1);
case Match_InvalidSImm6NonZero:
- return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 5),
- (1 << 5) - 1,
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 5), (1 << 5) - 1,
"immediate must be non-zero in the range");
case Match_InvalidCLUIImm:
return generateImmOutOfRangeError(
@@ -742,24 +842,36 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
Operands, ErrorInfo, -(1 << 9), (1 << 9) - 16,
"immediate must be a multiple of 16 bytes and non-zero in the range");
case Match_InvalidSImm12:
- return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 11),
- (1 << 11) - 1);
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, -(1 << 11), (1 << 11) - 1,
+ "operand must be a symbol with %lo/%pcrel_lo modifier or an integer in "
+ "the range");
case Match_InvalidSImm12Lsb0:
return generateImmOutOfRangeError(
Operands, ErrorInfo, -(1 << 11), (1 << 11) - 2,
"immediate must be a multiple of 2 bytes in the range");
- case Match_InvalidUImm12:
- return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 12) - 1);
case Match_InvalidSImm13Lsb0:
return generateImmOutOfRangeError(
Operands, ErrorInfo, -(1 << 12), (1 << 12) - 2,
"immediate must be a multiple of 2 bytes in the range");
- case Match_InvalidUImm20:
- return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 20) - 1);
- case Match_InvalidSImm21Lsb0:
+ case Match_InvalidUImm20LUI:
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 20) - 1,
+ "operand must be a symbol with %hi() "
+ "modifier or an integer in the range");
+ case Match_InvalidUImm20AUIPC:
+ return generateImmOutOfRangeError(
+ Operands, ErrorInfo, 0, (1 << 20) - 1,
+ "operand must be a symbol with %pcrel_hi() modifier or an integer in "
+ "the range");
+ case Match_InvalidSImm21Lsb0JAL:
return generateImmOutOfRangeError(
Operands, ErrorInfo, -(1 << 20), (1 << 20) - 2,
"immediate must be a multiple of 2 bytes in the range");
+ case Match_InvalidCSRSystemRegister: {
+ return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 12) - 1,
+ "operand must be a valid system register "
+ "name or an integer in the range");
+ }
case Match_InvalidFenceArg: {
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
return Error(
@@ -842,9 +954,9 @@ OperandMatchResultTy RISCVAsmParser::parseRegister(OperandVector &Operands,
return MatchOperand_Success;
}
-OperandMatchResultTy RISCVAsmParser::parseImmediate(OperandVector &Operands) {
+OperandMatchResultTy
+RISCVAsmParser::parseCSRSystemRegister(OperandVector &Operands) {
SMLoc S = getLoc();
- SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
const MCExpr *Res;
switch (getLexer().getKind()) {
@@ -854,18 +966,77 @@ OperandMatchResultTy RISCVAsmParser::parseImmediate(OperandVector &Operands) {
case AsmToken::Minus:
case AsmToken::Plus:
case AsmToken::Integer:
- case AsmToken::String:
+ case AsmToken::String: {
if (getParser().parseExpression(Res))
return MatchOperand_ParseFail;
- break;
+
+ auto *CE = dyn_cast<MCConstantExpr>(Res);
+ if (CE) {
+ int64_t Imm = CE->getValue();
+ if (isUInt<12>(Imm)) {
+ auto SysReg = RISCVSysReg::lookupSysRegByEncoding(Imm);
+ // Accept an immediate representing a named or un-named Sys Reg
+ // if the range is valid, regardless of the required features.
+ Operands.push_back(RISCVOperand::createSysReg(
+ SysReg ? SysReg->Name : "", S, Imm, isRV64()));
+ return MatchOperand_Success;
+ }
+ }
+
+ Twine Msg = "immediate must be an integer in the range";
+ Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
+ return MatchOperand_ParseFail;
+ }
case AsmToken::Identifier: {
StringRef Identifier;
if (getParser().parseIdentifier(Identifier))
return MatchOperand_ParseFail;
- MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
- Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
- break;
+
+ auto SysReg = RISCVSysReg::lookupSysRegByName(Identifier);
+ // Accept a named Sys Reg if the required features are present.
+ if (SysReg) {
+ if (!SysReg->haveRequiredFeatures(getSTI().getFeatureBits())) {
+ Error(S, "system register use requires an option to be enabled");
+ return MatchOperand_ParseFail;
+ }
+ Operands.push_back(RISCVOperand::createSysReg(
+ Identifier, S, SysReg->Encoding, isRV64()));
+ return MatchOperand_Success;
+ }
+
+ Twine Msg = "operand must be a valid system register name "
+ "or an integer in the range";
+ Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
+ return MatchOperand_ParseFail;
+ }
+ case AsmToken::Percent: {
+ // Discard operand with modifier.
+ Twine Msg = "immediate must be an integer in the range";
+ Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
+ return MatchOperand_ParseFail;
+ }
}
+
+ return MatchOperand_NoMatch;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseImmediate(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
+ const MCExpr *Res;
+
+ switch (getLexer().getKind()) {
+ default:
+ return MatchOperand_NoMatch;
+ case AsmToken::LParen:
+ case AsmToken::Minus:
+ case AsmToken::Plus:
+ case AsmToken::Integer:
+ case AsmToken::String:
+ case AsmToken::Identifier:
+ if (getParser().parseExpression(Res))
+ return MatchOperand_ParseFail;
+ break;
case AsmToken::Percent:
return parseOperandWithModifier(Operands);
}
@@ -914,6 +1085,41 @@ RISCVAsmParser::parseOperandWithModifier(OperandVector &Operands) {
return MatchOperand_Success;
}
+OperandMatchResultTy RISCVAsmParser::parseBareSymbol(OperandVector &Operands) {
+ SMLoc S = getLoc();
+ SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
+ const MCExpr *Res;
+
+ if (getLexer().getKind() != AsmToken::Identifier)
+ return MatchOperand_NoMatch;
+
+ StringRef Identifier;
+ if (getParser().parseIdentifier(Identifier))
+ return MatchOperand_ParseFail;
+
+ MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
+ Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
+ Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy RISCVAsmParser::parseJALOffset(OperandVector &Operands) {
+ // Parsing jal operands is fiddly due to the `jal foo` and `jal ra, foo`
+ // both being acceptable forms. When parsing `jal ra, foo` this function
+ // will be called for the `ra` register operand in an attempt to match the
+ // single-operand alias. parseJALOffset must fail for this case. It would
+ // seem logical to try parse the operand using parseImmediate and return
+ // NoMatch if the next token is a comma (meaning we must be parsing a jal in
+ // the second form rather than the first). We can't do this as there's no
+ // way of rewinding the lexer state. Instead, return NoMatch if this operand
+ // is an identifier and is followed by a comma.
+ if (getLexer().is(AsmToken::Identifier) &&
+ getLexer().peekTok().is(AsmToken::Comma))
+ return MatchOperand_NoMatch;
+
+ return parseImmediate(Operands);
+}
+
OperandMatchResultTy
RISCVAsmParser::parseMemOpBaseReg(OperandVector &Operands) {
if (getLexer().isNot(AsmToken::LParen)) {
@@ -942,13 +1148,19 @@ RISCVAsmParser::parseMemOpBaseReg(OperandVector &Operands) {
/// Looks at a token type and creates the relevant operand from this
/// information, adding to Operands. If operand was parsed, returns false, else
-/// true. If ForceImmediate is true, no attempt will be made to parse the
-/// operand as a register, which is needed for pseudoinstructions such as
-/// call.
-bool RISCVAsmParser::parseOperand(OperandVector &Operands,
- bool ForceImmediate) {
- // Attempt to parse token as register, unless ForceImmediate.
- if (!ForceImmediate && parseRegister(Operands, true) == MatchOperand_Success)
+/// true.
+bool RISCVAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
+ // Check if the current operand has a custom associated parser, if so, try to
+ // custom parse the operand, or fallback to the general approach.
+ OperandMatchResultTy Result =
+ MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
+ if (Result == MatchOperand_Success)
+ return false;
+ if (Result == MatchOperand_ParseFail)
+ return true;
+
+ // Attempt to parse token as a register.
+ if (parseRegister(Operands, true) == MatchOperand_Success)
return false;
// Attempt to parse token as an immediate
@@ -967,6 +1179,21 @@ bool RISCVAsmParser::parseOperand(OperandVector &Operands,
bool RISCVAsmParser::ParseInstruction(ParseInstructionInfo &Info,
StringRef Name, SMLoc NameLoc,
OperandVector &Operands) {
+ // Ensure that if the instruction occurs when relaxation is enabled,
+ // relocations are forced for the file. Ideally this would be done when there
+ // is enough information to reliably determine if the instruction itself may
+ // cause relaxations. Unfortunately instruction processing stage occurs in the
+ // same pass as relocation emission, so it's too late to set a 'sticky bit'
+ // for the entire file.
+ if (getSTI().getFeatureBits()[RISCV::FeatureRelax]) {
+ auto *Assembler = getTargetStreamer().getStreamer().getAssemblerPtr();
+ if (Assembler != nullptr) {
+ RISCVAsmBackend &MAB =
+ static_cast<RISCVAsmBackend &>(Assembler->getBackend());
+ MAB.setForceRelocs();
+ }
+ }
+
// First operand is token for instruction
Operands.push_back(RISCVOperand::createToken(Name, NameLoc, isRV64()));
@@ -975,18 +1202,20 @@ bool RISCVAsmParser::ParseInstruction(ParseInstructionInfo &Info,
return false;
// Parse first operand
- bool ForceImmediate = (Name == "call" || Name == "tail");
- if (parseOperand(Operands, ForceImmediate))
+ if (parseOperand(Operands, Name))
return true;
// Parse until end of statement, consuming commas between operands
+ unsigned OperandIdx = 1;
while (getLexer().is(AsmToken::Comma)) {
// Consume comma token
getLexer().Lex();
// Parse next operand
- if (parseOperand(Operands, false))
+ if (parseOperand(Operands, Name))
return true;
+
+ ++OperandIdx;
}
if (getLexer().isNot(AsmToken::EndOfStatement)) {
@@ -1068,6 +1297,33 @@ bool RISCVAsmParser::parseDirectiveOption() {
StringRef Option = Tok.getIdentifier();
+ if (Option == "push") {
+ getTargetStreamer().emitDirectiveOptionPush();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ pushFeatureBits();
+ return false;
+ }
+
+ if (Option == "pop") {
+ SMLoc StartLoc = Parser.getTok().getLoc();
+ getTargetStreamer().emitDirectiveOptionPop();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ if (popFeatureBits())
+ return Error(StartLoc, ".option pop with no .option push");
+
+ return false;
+ }
+
if (Option == "rvc") {
getTargetStreamer().emitDirectiveOptionRVC();
@@ -1092,9 +1348,34 @@ bool RISCVAsmParser::parseDirectiveOption() {
return false;
}
+ if (Option == "relax") {
+ getTargetStreamer().emitDirectiveOptionRelax();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ setFeatureBits(RISCV::FeatureRelax, "relax");
+ return false;
+ }
+
+ if (Option == "norelax") {
+ getTargetStreamer().emitDirectiveOptionNoRelax();
+
+ Parser.Lex();
+ if (Parser.getTok().isNot(AsmToken::EndOfStatement))
+ return Error(Parser.getTok().getLoc(),
+ "unexpected token, expected end of statement");
+
+ clearFeatureBits(RISCV::FeatureRelax, "relax");
+ return false;
+ }
+
// Unknown option.
Warning(Parser.getTok().getLoc(),
- "unknown option, expected 'rvc' or 'norvc'");
+ "unknown option, expected 'push', 'pop', 'rvc', 'norvc', 'relax' or "
+ "'norelax'");
Parser.eatToEndOfStatement();
return false;
}
@@ -1108,80 +1389,54 @@ void RISCVAsmParser::emitToStreamer(MCStreamer &S, const MCInst &Inst) {
void RISCVAsmParser::emitLoadImm(unsigned DestReg, int64_t Value,
MCStreamer &Out) {
- if (isInt<32>(Value)) {
- // Emits the MC instructions for loading a 32-bit constant into a register.
- //
- // Depending on the active bits in the immediate Value v, the following
- // instruction sequences are emitted:
- //
- // v == 0 : ADDI(W)
- // v[0,12) != 0 && v[12,32) == 0 : ADDI(W)
- // v[0,12) == 0 && v[12,32) != 0 : LUI
- // v[0,32) != 0 : LUI+ADDI(W)
- //
- int64_t Hi20 = ((Value + 0x800) >> 12) & 0xFFFFF;
- int64_t Lo12 = SignExtend64<12>(Value);
- unsigned SrcReg = RISCV::X0;
-
- if (Hi20) {
- emitToStreamer(Out,
- MCInstBuilder(RISCV::LUI).addReg(DestReg).addImm(Hi20));
- SrcReg = DestReg;
+ RISCVMatInt::InstSeq Seq;
+ RISCVMatInt::generateInstSeq(Value, isRV64(), Seq);
+
+ unsigned SrcReg = RISCV::X0;
+ for (RISCVMatInt::Inst &Inst : Seq) {
+ if (Inst.Opc == RISCV::LUI) {
+ emitToStreamer(
+ Out, MCInstBuilder(RISCV::LUI).addReg(DestReg).addImm(Inst.Imm));
+ } else {
+ emitToStreamer(
+ Out, MCInstBuilder(Inst.Opc).addReg(DestReg).addReg(SrcReg).addImm(
+ Inst.Imm));
}
- if (Lo12 || Hi20 == 0) {
- unsigned AddiOpcode =
- STI->hasFeature(RISCV::Feature64Bit) ? RISCV::ADDIW : RISCV::ADDI;
- emitToStreamer(Out, MCInstBuilder(AddiOpcode)
- .addReg(DestReg)
- .addReg(SrcReg)
- .addImm(Lo12));
- }
- return;
- }
- assert(STI->hasFeature(RISCV::Feature64Bit) &&
- "Target must be 64-bit to support a >32-bit constant");
-
- // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
- // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emmitted. Note
- // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
- // while the following ADDI instructions contribute up to 12 bits each.
- //
- // On the first glance, implementing this seems to be possible by simply
- // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
- // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
- // fact that ADDI performs a sign extended addition, doing it like that would
- // only be possible when at most 11 bits of the ADDI instructions are used.
- // Using all 12 bits of the ADDI instructions, like done by GAS, actually
- // requires that the constant is processed starting with the least significant
- // bit.
- //
- // In the following, constants are processed from LSB to MSB but instruction
- // emission is performed from MSB to LSB by recursively calling
- // emitLoadImm. In each recursion, first the lowest 12 bits are removed
- // from the constant and the optimal shift amount, which can be greater than
- // 12 bits if the constant is sparse, is determined. Then, the shifted
- // remaining constant is processed recursively and gets emitted as soon as it
- // fits into 32 bits. The emission of the shifts and additions is subsequently
- // performed when the recursion returns.
- //
- int64_t Lo12 = SignExtend64<12>(Value);
- int64_t Hi52 = (Value + 0x800) >> 12;
- int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
- Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
-
- emitLoadImm(DestReg, Hi52, Out);
-
- emitToStreamer(Out, MCInstBuilder(RISCV::SLLI)
- .addReg(DestReg)
- .addReg(DestReg)
- .addImm(ShiftAmount));
-
- if (Lo12)
- emitToStreamer(Out, MCInstBuilder(RISCV::ADDI)
- .addReg(DestReg)
- .addReg(DestReg)
- .addImm(Lo12));
+ // Only the first instruction has X0 as its source.
+ SrcReg = DestReg;
+ }
+}
+
+void RISCVAsmParser::emitLoadLocalAddress(MCInst &Inst, SMLoc IDLoc,
+ MCStreamer &Out) {
+ // The local load address pseudo-instruction "lla" is used in PC-relative
+ // addressing of symbols:
+ // lla rdest, symbol
+ // expands to
+ // TmpLabel: AUIPC rdest, %pcrel_hi(symbol)
+ // ADDI rdest, %pcrel_lo(TmpLabel)
+ MCContext &Ctx = getContext();
+
+ MCSymbol *TmpLabel = Ctx.createTempSymbol(
+ "pcrel_hi", /* AlwaysAddSuffix */ true, /* CanBeUnnamed */ false);
+ Out.EmitLabel(TmpLabel);
+
+ MCOperand DestReg = Inst.getOperand(0);
+ const RISCVMCExpr *Symbol = RISCVMCExpr::create(
+ Inst.getOperand(1).getExpr(), RISCVMCExpr::VK_RISCV_PCREL_HI, Ctx);
+
+ emitToStreamer(
+ Out, MCInstBuilder(RISCV::AUIPC).addOperand(DestReg).addExpr(Symbol));
+
+ const MCExpr *RefToLinkTmpLabel =
+ RISCVMCExpr::create(MCSymbolRefExpr::create(TmpLabel, Ctx),
+ RISCVMCExpr::VK_RISCV_PCREL_LO, Ctx);
+
+ emitToStreamer(Out, MCInstBuilder(RISCV::ADDI)
+ .addOperand(DestReg)
+ .addOperand(DestReg)
+ .addExpr(RefToLinkTmpLabel));
}
bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
@@ -1189,7 +1444,17 @@ bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
Inst.setLoc(IDLoc);
if (Inst.getOpcode() == RISCV::PseudoLI) {
- auto Reg = Inst.getOperand(0).getReg();
+ unsigned Reg = Inst.getOperand(0).getReg();
+ const MCOperand &Op1 = Inst.getOperand(1);
+ if (Op1.isExpr()) {
+ // We must have li reg, %lo(sym) or li reg, %pcrel_lo(sym) or similar.
+ // Just convert to an addi. This allows compatibility with gas.
+ emitToStreamer(Out, MCInstBuilder(RISCV::ADDI)
+ .addReg(Reg)
+ .addReg(RISCV::X0)
+ .addExpr(Op1.getExpr()));
+ return false;
+ }
int64_t Imm = Inst.getOperand(1).getImm();
// On RV32 the immediate here can either be a signed or an unsigned
// 32-bit number. Sign extension has to be performed to ensure that Imm
@@ -1198,6 +1463,9 @@ bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
Imm = SignExtend64<32>(Imm);
emitLoadImm(Reg, Imm, Out);
return false;
+ } else if (Inst.getOpcode() == RISCV::PseudoLLA) {
+ emitLoadLocalAddress(Inst, IDLoc, Out);
+ return false;
}
emitToStreamer(Out, Inst);
diff --git a/lib/Target/RISCV/CMakeLists.txt b/lib/Target/RISCV/CMakeLists.txt
index f8d4e2b9517d..07c32cb315d0 100644
--- a/lib/Target/RISCV/CMakeLists.txt
+++ b/lib/Target/RISCV/CMakeLists.txt
@@ -10,11 +10,13 @@ tablegen(LLVM RISCVGenMCCodeEmitter.inc -gen-emitter)
tablegen(LLVM RISCVGenMCPseudoLowering.inc -gen-pseudo-lowering)
tablegen(LLVM RISCVGenRegisterInfo.inc -gen-register-info)
tablegen(LLVM RISCVGenSubtargetInfo.inc -gen-subtarget)
+tablegen(LLVM RISCVGenSystemOperands.inc -gen-searchable-tables)
add_public_tablegen_target(RISCVCommonTableGen)
add_llvm_target(RISCVCodeGen
RISCVAsmPrinter.cpp
+ RISCVExpandPseudoInsts.cpp
RISCVFrameLowering.cpp
RISCVInstrInfo.cpp
RISCVISelDAGToDAG.cpp
@@ -32,3 +34,4 @@ add_subdirectory(Disassembler)
add_subdirectory(InstPrinter)
add_subdirectory(MCTargetDesc)
add_subdirectory(TargetInfo)
+add_subdirectory(Utils)
diff --git a/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 7bbb371a757f..eafa09d56315 100644
--- a/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "Utils/RISCVBaseInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCFixedLenDisassembler.h"
@@ -211,6 +212,15 @@ static DecodeStatus decodeUImmOperand(MCInst &Inst, uint64_t Imm,
}
template <unsigned N>
+static DecodeStatus decodeUImmNonZeroOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ if (Imm == 0)
+ return MCDisassembler::Fail;
+ return decodeUImmOperand<N>(Inst, Imm, Address, Decoder);
+}
+
+template <unsigned N>
static DecodeStatus decodeSImmOperand(MCInst &Inst, uint64_t Imm,
int64_t Address, const void *Decoder) {
assert(isUInt<N>(Imm) && "Invalid immediate");
@@ -221,6 +231,15 @@ static DecodeStatus decodeSImmOperand(MCInst &Inst, uint64_t Imm,
}
template <unsigned N>
+static DecodeStatus decodeSImmNonZeroOperand(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ if (Imm == 0)
+ return MCDisassembler::Fail;
+ return decodeSImmOperand<N>(Inst, Imm, Address, Decoder);
+}
+
+template <unsigned N>
static DecodeStatus decodeSImmOperandAndLsl1(MCInst &Inst, uint64_t Imm,
int64_t Address,
const void *Decoder) {
@@ -243,6 +262,17 @@ static DecodeStatus decodeCLUIImmOperand(MCInst &Inst, uint64_t Imm,
return MCDisassembler::Success;
}
+static DecodeStatus decodeFRMArg(MCInst &Inst, uint64_t Imm,
+ int64_t Address,
+ const void *Decoder) {
+ assert(isUInt<3>(Imm) && "Invalid immediate");
+ if (!llvm::RISCVFPRndMode::isValidRoundingMode(Imm))
+ return MCDisassembler::Fail;
+
+ Inst.addOperand(MCOperand::createImm(Imm));
+ return MCDisassembler::Success;
+}
+
#include "RISCVGenDisassemblerTables.inc"
DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
@@ -257,11 +287,19 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
// It's a 32 bit instruction if bit 0 and 1 are 1.
if ((Bytes[0] & 0x3) == 0x3) {
+ if (Bytes.size() < 4) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
Insn = support::endian::read32le(Bytes.data());
LLVM_DEBUG(dbgs() << "Trying RISCV32 table :\n");
Result = decodeInstruction(DecoderTable32, MI, Insn, Address, this, STI);
Size = 4;
} else {
+ if (Bytes.size() < 2) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
Insn = support::endian::read16le(Bytes.data());
if (!STI.getFeatureBits()[RISCV::Feature64Bit]) {
diff --git a/lib/Target/RISCV/InstPrinter/LLVMBuild.txt b/lib/Target/RISCV/InstPrinter/LLVMBuild.txt
index 5f4545e3d676..5a28372a302e 100644
--- a/lib/Target/RISCV/InstPrinter/LLVMBuild.txt
+++ b/lib/Target/RISCV/InstPrinter/LLVMBuild.txt
@@ -19,5 +19,5 @@
type = Library
name = RISCVAsmPrinter
parent = RISCV
-required_libraries = MC Support
+required_libraries = MC RISCVUtils Support
add_to_library_groups = RISCV
diff --git a/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp b/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp
index 300e6fd9750a..979c8f4e2fa7 100644
--- a/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp
+++ b/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "RISCVInstPrinter.h"
-#include "MCTargetDesc/RISCVBaseInfo.h"
#include "MCTargetDesc/RISCVMCExpr.h"
+#include "Utils/RISCVBaseInfo.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
@@ -36,10 +36,9 @@ using namespace llvm;
#include "RISCVGenCompressInstEmitter.inc"
static cl::opt<bool>
-NoAliases("riscv-no-aliases",
- cl::desc("Disable the emission of assembler pseudo instructions"),
- cl::init(false),
- cl::Hidden);
+ NoAliases("riscv-no-aliases",
+ cl::desc("Disable the emission of assembler pseudo instructions"),
+ cl::init(false), cl::Hidden);
void RISCVInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot, const MCSubtargetInfo &STI) {
@@ -49,7 +48,7 @@ void RISCVInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
if (!NoAliases)
Res = uncompressInst(UncompressedMI, *MI, MRI, STI);
if (Res)
- NewMI = const_cast<MCInst*>(&UncompressedMI);
+ NewMI = const_cast<MCInst *>(&UncompressedMI);
if (NoAliases || !printAliasInstr(NewMI, STI, O))
printInstruction(NewMI, STI, O);
printAnnotation(O, Annot);
@@ -60,8 +59,8 @@ void RISCVInstPrinter::printRegName(raw_ostream &O, unsigned RegNo) const {
}
void RISCVInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
- const MCSubtargetInfo &STI,
- raw_ostream &O, const char *Modifier) {
+ const MCSubtargetInfo &STI, raw_ostream &O,
+ const char *Modifier) {
assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
const MCOperand &MO = MI->getOperand(OpNo);
@@ -79,10 +78,23 @@ void RISCVInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
MO.getExpr()->print(O, &MAI);
}
+void RISCVInstPrinter::printCSRSystemRegister(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ unsigned Imm = MI->getOperand(OpNo).getImm();
+ auto SysReg = RISCVSysReg::lookupSysRegByEncoding(Imm);
+ if (SysReg && SysReg->haveRequiredFeatures(STI.getFeatureBits()))
+ O << SysReg->Name;
+ else
+ O << Imm;
+}
+
void RISCVInstPrinter::printFenceArg(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI,
raw_ostream &O) {
unsigned FenceArg = MI->getOperand(OpNo).getImm();
+ assert (((FenceArg >> 4) == 0) && "Invalid immediate in printFenceArg");
+
if ((FenceArg & RISCVFenceField::I) != 0)
O << 'i';
if ((FenceArg & RISCVFenceField::O) != 0)
@@ -91,11 +103,12 @@ void RISCVInstPrinter::printFenceArg(const MCInst *MI, unsigned OpNo,
O << 'r';
if ((FenceArg & RISCVFenceField::W) != 0)
O << 'w';
+ if (FenceArg == 0)
+ O << "unknown";
}
void RISCVInstPrinter::printFRMArg(const MCInst *MI, unsigned OpNo,
- const MCSubtargetInfo &STI,
- raw_ostream &O) {
+ const MCSubtargetInfo &STI, raw_ostream &O) {
auto FRMArg =
static_cast<RISCVFPRndMode::RoundingMode>(MI->getOperand(OpNo).getImm());
O << RISCVFPRndMode::roundingModeToString(FRMArg);
diff --git a/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.h b/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.h
index 241be8daf113..0f9bed184996 100644
--- a/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.h
+++ b/lib/Target/RISCV/InstPrinter/RISCVInstPrinter.h
@@ -32,6 +32,8 @@ public:
void printOperand(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
raw_ostream &O, const char *Modifier = nullptr);
+ void printCSRSystemRegister(const MCInst *MI, unsigned OpNo,
+ const MCSubtargetInfo &STI, raw_ostream &O);
void printFenceArg(const MCInst *MI, unsigned OpNo,
const MCSubtargetInfo &STI, raw_ostream &O);
void printFRMArg(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
@@ -48,6 +50,6 @@ public:
static const char *getRegisterName(unsigned RegNo,
unsigned AltIdx = RISCV::ABIRegAltName);
};
-}
+} // namespace llvm
#endif
diff --git a/lib/Target/RISCV/LLVMBuild.txt b/lib/Target/RISCV/LLVMBuild.txt
index ab21565b0c2e..7af605e6954d 100644
--- a/lib/Target/RISCV/LLVMBuild.txt
+++ b/lib/Target/RISCV/LLVMBuild.txt
@@ -16,7 +16,7 @@
;===------------------------------------------------------------------------===;
[common]
-subdirectories = AsmParser Disassembler InstPrinter TargetInfo MCTargetDesc
+subdirectories = AsmParser Disassembler InstPrinter TargetInfo MCTargetDesc Utils
[component_0]
type = TargetGroup
@@ -31,5 +31,5 @@ type = Library
name = RISCVCodeGen
parent = RISCV
required_libraries = AsmPrinter Core CodeGen MC RISCVAsmPrinter RISCVDesc
- RISCVInfo SelectionDAG Support Target
+ RISCVInfo RISCVUtils SelectionDAG Support Target
add_to_library_groups = RISCV
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
index 9ba7ebd0eb0f..7672fea5d95b 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -7,114 +7,58 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/RISCVFixupKinds.h"
-#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "RISCVAsmBackend.h"
+#include "RISCVMCExpr.h"
#include "llvm/ADT/APInt.h"
-#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
-#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-namespace {
-class RISCVAsmBackend : public MCAsmBackend {
- const MCSubtargetInfo &STI;
- uint8_t OSABI;
- bool Is64Bit;
+// If linker relaxation is enabled, or the relax option had previously been
+// enabled, always emit relocations even if the fixup can be resolved. This is
+// necessary for correctness as offsets may change during relaxation.
+bool RISCVAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
+ const MCFixup &Fixup,
+ const MCValue &Target) {
+ bool ShouldForce = false;
-public:
- RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit)
- : MCAsmBackend(support::little), STI(STI), OSABI(OSABI),
- Is64Bit(Is64Bit) {}
- ~RISCVAsmBackend() override {}
-
- // Generate diff expression relocations if the relax feature is enabled,
- // otherwise it is safe for the assembler to calculate these internally.
- bool requiresDiffExpressionRelocations() const override {
- return STI.getFeatureBits()[RISCV::FeatureRelax];
- }
- void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
- const MCValue &Target, MutableArrayRef<char> Data,
- uint64_t Value, bool IsResolved,
- const MCSubtargetInfo *STI) const override;
-
- std::unique_ptr<MCObjectTargetWriter>
- createObjectTargetWriter() const override;
-
- // If linker relaxation is enabled, always emit relocations even if the fixup
- // can be resolved. This is necessary for correctness as offsets may change
- // during relaxation.
- bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
- const MCValue &Target) override {
- return STI.getFeatureBits()[RISCV::FeatureRelax];
- }
-
- bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout) const override {
- llvm_unreachable("Handled by fixupNeedsRelaxationAdvanced");
- }
-
- bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
- uint64_t Value,
- const MCRelaxableFragment *DF,
- const MCAsmLayout &Layout,
- const bool WasForced) const override;
-
- unsigned getNumFixupKinds() const override {
- return RISCV::NumTargetFixupKinds;
- }
-
- const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
- const static MCFixupKindInfo Infos[] = {
- // This table *must* be in the order that the fixup_* kinds are defined in
- // RISCVFixupKinds.h.
- //
- // name offset bits flags
- { "fixup_riscv_hi20", 12, 20, 0 },
- { "fixup_riscv_lo12_i", 20, 12, 0 },
- { "fixup_riscv_lo12_s", 0, 32, 0 },
- { "fixup_riscv_pcrel_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_pcrel_lo12_i", 20, 12, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_pcrel_lo12_s", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_jal", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_rvc_jump", 2, 11, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_rvc_branch", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_call", 0, 64, MCFixupKindInfo::FKF_IsPCRel },
- { "fixup_riscv_relax", 0, 0, 0 }
- };
- static_assert((array_lengthof(Infos)) == RISCV::NumTargetFixupKinds,
- "Not all fixup kinds added to Infos array");
-
- if (Kind < FirstTargetFixupKind)
- return MCAsmBackend::getFixupKindInfo(Kind);
-
- assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
- "Invalid kind!");
- return Infos[Kind - FirstTargetFixupKind];
+ switch ((unsigned)Fixup.getKind()) {
+ default:
+ break;
+ case RISCV::fixup_riscv_pcrel_lo12_i:
+ case RISCV::fixup_riscv_pcrel_lo12_s:
+ // For pcrel_lo12, force a relocation if the target of the corresponding
+ // pcrel_hi20 is not in the same fragment.
+ const MCFixup *T = cast<RISCVMCExpr>(Fixup.getValue())->getPCRelHiFixup();
+ if (!T) {
+ Asm.getContext().reportError(Fixup.getLoc(),
+ "could not find corresponding %pcrel_hi");
+ return false;
+ }
+
+ switch ((unsigned)T->getKind()) {
+ default:
+ llvm_unreachable("Unexpected fixup kind for pcrel_lo12");
+ break;
+ case RISCV::fixup_riscv_pcrel_hi20:
+ ShouldForce = T->getValue()->findAssociatedFragment() !=
+ Fixup.getValue()->findAssociatedFragment();
+ break;
+ }
+ break;
}
- bool mayNeedRelaxation(const MCInst &Inst,
- const MCSubtargetInfo &STI) const override;
- unsigned getRelaxedOpcode(unsigned Op) const;
-
- void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
- MCInst &Res) const override;
-
-
- bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
-};
-
+ return ShouldForce || STI.getFeatureBits()[RISCV::FeatureRelax] ||
+ ForceRelocs;
+}
bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
bool Resolved,
@@ -348,8 +292,6 @@ RISCVAsmBackend::createObjectTargetWriter() const {
return createRISCVELFObjectWriter(OSABI, Is64Bit);
}
-} // end anonymous namespace
-
MCAsmBackend *llvm::createRISCVAsmBackend(const Target &T,
const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
new file mode 100644
index 000000000000..b98e45f4053f
--- /dev/null
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
@@ -0,0 +1,113 @@
+//===-- RISCVAsmBackend.h - RISCV Assembler Backend -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVASMBACKEND_H
+#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVASMBACKEND_H
+
+#include "MCTargetDesc/RISCVFixupKinds.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+
+namespace llvm {
+class MCAssembler;
+class MCObjectTargetWriter;
+class raw_ostream;
+
+class RISCVAsmBackend : public MCAsmBackend {
+ const MCSubtargetInfo &STI;
+ uint8_t OSABI;
+ bool Is64Bit;
+ bool ForceRelocs = false;
+
+public:
+ RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit)
+ : MCAsmBackend(support::little), STI(STI), OSABI(OSABI),
+ Is64Bit(Is64Bit) {}
+ ~RISCVAsmBackend() override {}
+
+ void setForceRelocs() { ForceRelocs = true; }
+
+ // Generate diff expression relocations if the relax feature is enabled or had
+ // previously been enabled, otherwise it is safe for the assembler to
+ // calculate these internally.
+ bool requiresDiffExpressionRelocations() const override {
+ return STI.getFeatureBits()[RISCV::FeatureRelax] || ForceRelocs;
+ }
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsResolved,
+ const MCSubtargetInfo *STI) const override;
+
+ std::unique_ptr<MCObjectTargetWriter>
+ createObjectTargetWriter() const override;
+
+ bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target) override;
+
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ llvm_unreachable("Handled by fixupNeedsRelaxationAdvanced");
+ }
+
+ bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout,
+ const bool WasForced) const override;
+
+ unsigned getNumFixupKinds() const override {
+ return RISCV::NumTargetFixupKinds;
+ }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
+ const static MCFixupKindInfo Infos[] = {
+ // This table *must* be in the order that the fixup_* kinds are defined in
+ // RISCVFixupKinds.h.
+ //
+ // name offset bits flags
+ { "fixup_riscv_hi20", 12, 20, 0 },
+ { "fixup_riscv_lo12_i", 20, 12, 0 },
+ { "fixup_riscv_lo12_s", 0, 32, 0 },
+ { "fixup_riscv_pcrel_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_pcrel_lo12_i", 20, 12, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_pcrel_lo12_s", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_jal", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_rvc_jump", 2, 11, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_rvc_branch", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_call", 0, 64, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_riscv_relax", 0, 0, 0 }
+ };
+ static_assert((array_lengthof(Infos)) == RISCV::NumTargetFixupKinds,
+ "Not all fixup kinds added to Infos array");
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+ }
+
+ bool mayNeedRelaxation(const MCInst &Inst,
+ const MCSubtargetInfo &STI) const override;
+ unsigned getRelaxedOpcode(unsigned Op) const;
+
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
+
+
+ bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
+};
+}
+
+#endif
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
index 6428b11cfe9c..a6ba1e41e964 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.cpp
@@ -38,5 +38,9 @@ MCELFStreamer &RISCVTargetELFStreamer::getStreamer() {
return static_cast<MCELFStreamer &>(Streamer);
}
+void RISCVTargetELFStreamer::emitDirectiveOptionPush() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionPop() {}
void RISCVTargetELFStreamer::emitDirectiveOptionRVC() {}
void RISCVTargetELFStreamer::emitDirectiveOptionNoRVC() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionRelax() {}
+void RISCVTargetELFStreamer::emitDirectiveOptionNoRelax() {}
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h b/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
index daa7abfe1336..1f36bbc43882 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVELFStreamer.h
@@ -20,8 +20,12 @@ public:
MCELFStreamer &getStreamer();
RISCVTargetELFStreamer(MCStreamer &S, const MCSubtargetInfo &STI);
+ virtual void emitDirectiveOptionPush();
+ virtual void emitDirectiveOptionPop();
virtual void emitDirectiveOptionRVC();
virtual void emitDirectiveOptionNoRVC();
+ virtual void emitDirectiveOptionRelax();
+ virtual void emitDirectiveOptionNoRelax();
};
}
#endif
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
index 8a796a014b33..c5a4ffc0e360 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
@@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "MCTargetDesc/RISCVBaseInfo.h"
#include "MCTargetDesc/RISCVFixupKinds.h"
#include "MCTargetDesc/RISCVMCExpr.h"
#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "Utils/RISCVBaseInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCCodeEmitter.h"
@@ -196,7 +196,7 @@ unsigned RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo,
MCInstrDesc const &Desc = MCII.get(MI.getOpcode());
unsigned MIFrm = Desc.TSFlags & RISCVII::InstFormatMask;
- // If the destination is an immediate, there is nothing to do
+ // If the destination is an immediate, there is nothing to do.
if (MO.isImm())
return MO.getImm();
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
index 085dcd4e5f66..53648a5922c8 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.cpp
@@ -14,12 +14,12 @@
#include "RISCV.h"
#include "RISCVMCExpr.h"
+#include "RISCVFixupKinds.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/MCValue.h"
-#include "llvm/Object/ELF.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
@@ -41,9 +41,90 @@ void RISCVMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
OS << ')';
}
+const MCFixup *RISCVMCExpr::getPCRelHiFixup() const {
+ MCValue AUIPCLoc;
+ if (!getSubExpr()->evaluateAsRelocatable(AUIPCLoc, nullptr, nullptr))
+ return nullptr;
+
+ const MCSymbolRefExpr *AUIPCSRE = AUIPCLoc.getSymA();
+ if (!AUIPCSRE)
+ return nullptr;
+
+ const auto *DF =
+ dyn_cast_or_null<MCDataFragment>(AUIPCSRE->findAssociatedFragment());
+ if (!DF)
+ return nullptr;
+
+ const MCSymbol *AUIPCSymbol = &AUIPCSRE->getSymbol();
+ for (const MCFixup &F : DF->getFixups()) {
+ if (F.getOffset() != AUIPCSymbol->getOffset())
+ continue;
+
+ switch ((unsigned)F.getKind()) {
+ default:
+ continue;
+ case RISCV::fixup_riscv_pcrel_hi20:
+ return &F;
+ }
+ }
+
+ return nullptr;
+}
+
+bool RISCVMCExpr::evaluatePCRelLo(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const {
+ // VK_RISCV_PCREL_LO has to be handled specially. The MCExpr inside is
+ // actually the location of a auipc instruction with a VK_RISCV_PCREL_HI fixup
+ // pointing to the real target. We need to generate an MCValue in the form of
+ // (<real target> + <offset from this fixup to the auipc fixup>). The Fixup
+ // is pcrel relative to the VK_RISCV_PCREL_LO fixup, so we need to add the
+ // offset to the VK_RISCV_PCREL_HI Fixup from VK_RISCV_PCREL_LO to correct.
+ MCValue AUIPCLoc;
+ if (!getSubExpr()->evaluateAsValue(AUIPCLoc, *Layout))
+ return false;
+
+ const MCSymbolRefExpr *AUIPCSRE = AUIPCLoc.getSymA();
+ // Don't try to evaluate %pcrel_hi/%pcrel_lo pairs that cross fragment
+ // boundries.
+ if (!AUIPCSRE ||
+ findAssociatedFragment() != AUIPCSRE->findAssociatedFragment())
+ return false;
+
+ const MCSymbol *AUIPCSymbol = &AUIPCSRE->getSymbol();
+ if (!AUIPCSymbol)
+ return false;
+
+ const MCFixup *TargetFixup = getPCRelHiFixup();
+ if (!TargetFixup)
+ return false;
+
+ if ((unsigned)TargetFixup->getKind() != RISCV::fixup_riscv_pcrel_hi20)
+ return false;
+
+ MCValue Target;
+ if (!TargetFixup->getValue()->evaluateAsValue(Target, *Layout))
+ return false;
+
+ if (!Target.getSymA() || !Target.getSymA()->getSymbol().isInSection())
+ return false;
+
+ if (&Target.getSymA()->getSymbol().getSection() !=
+ findAssociatedFragment()->getParent())
+ return false;
+
+ uint64_t AUIPCOffset = AUIPCSymbol->getOffset();
+
+ Res = MCValue::get(Target.getSymA(), nullptr,
+ Target.getConstant() + (Fixup->getOffset() - AUIPCOffset));
+ return true;
+}
+
bool RISCVMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
const MCAsmLayout *Layout,
const MCFixup *Fixup) const {
+ if (Kind == VK_RISCV_PCREL_LO && evaluatePCRelLo(Res, Layout, Fixup))
+ return true;
+
if (!getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup))
return false;
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
index d2e0f6b6cdae..4eafcc08b51f 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVMCExpr.h
@@ -39,6 +39,9 @@ private:
int64_t evaluateAsInt64(int64_t Value) const;
+ bool evaluatePCRelLo(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const;
+
explicit RISCVMCExpr(const MCExpr *Expr, VariantKind Kind)
: Expr(Expr), Kind(Kind) {}
@@ -50,6 +53,13 @@ public:
const MCExpr *getSubExpr() const { return Expr; }
+ /// Get the MCExpr of the VK_RISCV_PCREL_HI Fixup that the
+ /// VK_RISCV_PCREL_LO points to.
+ ///
+ /// \returns nullptr if this isn't a VK_RISCV_PCREL_LO pointing to a
+ /// VK_RISCV_PCREL_HI.
+ const MCFixup *getPCRelHiFixup() const;
+
void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
bool evaluateAsRelocatableImpl(MCValue &Res, const MCAsmLayout *Layout,
const MCFixup *Fixup) const override;
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
index 2d5205aa7ef7..8d5ef3dbd17f 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
@@ -23,6 +23,14 @@ RISCVTargetAsmStreamer::RISCVTargetAsmStreamer(MCStreamer &S,
formatted_raw_ostream &OS)
: RISCVTargetStreamer(S), OS(OS) {}
+void RISCVTargetAsmStreamer::emitDirectiveOptionPush() {
+ OS << "\t.option\tpush\n";
+}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionPop() {
+ OS << "\t.option\tpop\n";
+}
+
void RISCVTargetAsmStreamer::emitDirectiveOptionRVC() {
OS << "\t.option\trvc\n";
}
@@ -30,3 +38,11 @@ void RISCVTargetAsmStreamer::emitDirectiveOptionRVC() {
void RISCVTargetAsmStreamer::emitDirectiveOptionNoRVC() {
OS << "\t.option\tnorvc\n";
}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionRelax() {
+ OS << "\t.option\trelax\n";
+}
+
+void RISCVTargetAsmStreamer::emitDirectiveOptionNoRelax() {
+ OS << "\t.option\tnorelax\n";
+}
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h b/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
index 525c20810f24..74ec9e303933 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
+++ b/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.h
@@ -18,8 +18,12 @@ class RISCVTargetStreamer : public MCTargetStreamer {
public:
RISCVTargetStreamer(MCStreamer &S);
+ virtual void emitDirectiveOptionPush() = 0;
+ virtual void emitDirectiveOptionPop() = 0;
virtual void emitDirectiveOptionRVC() = 0;
virtual void emitDirectiveOptionNoRVC() = 0;
+ virtual void emitDirectiveOptionRelax() = 0;
+ virtual void emitDirectiveOptionNoRelax() = 0;
};
// This part is for ascii assembly output
@@ -29,8 +33,12 @@ class RISCVTargetAsmStreamer : public RISCVTargetStreamer {
public:
RISCVTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+ void emitDirectiveOptionPush() override;
+ void emitDirectiveOptionPop() override;
void emitDirectiveOptionRVC() override;
void emitDirectiveOptionNoRVC() override;
+ void emitDirectiveOptionRelax() override;
+ void emitDirectiveOptionNoRelax() override;
};
}
diff --git a/lib/Target/RISCV/RISCV.h b/lib/Target/RISCV/RISCV.h
index 2e4f536aca35..b25aee46200d 100644
--- a/lib/Target/RISCV/RISCV.h
+++ b/lib/Target/RISCV/RISCV.h
@@ -15,7 +15,8 @@
#ifndef LLVM_LIB_TARGET_RISCV_RISCV_H
#define LLVM_LIB_TARGET_RISCV_RISCV_H
-#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "Utils/RISCVBaseInfo.h"
+#include "llvm/Target/TargetMachine.h"
namespace llvm {
class RISCVTargetMachine;
@@ -36,6 +37,9 @@ FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM);
FunctionPass *createRISCVMergeBaseOffsetOptPass();
void initializeRISCVMergeBaseOffsetOptPass(PassRegistry &);
+
+FunctionPass *createRISCVExpandPseudoPass();
+void initializeRISCVExpandPseudoPass(PassRegistry &);
}
#endif
diff --git a/lib/Target/RISCV/RISCV.td b/lib/Target/RISCV/RISCV.td
index 281378cb2eee..0e86e2bc5e98 100644
--- a/lib/Target/RISCV/RISCV.td
+++ b/lib/Target/RISCV/RISCV.td
@@ -68,6 +68,12 @@ include "RISCVCallingConv.td"
include "RISCVInstrInfo.td"
//===----------------------------------------------------------------------===//
+// Named operands for CSR instructions.
+//===----------------------------------------------------------------------===//
+
+include "RISCVSystemOperands.td"
+
+//===----------------------------------------------------------------------===//
// RISC-V processors supported.
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
new file mode 100644
index 000000000000..35c185aa5edd
--- /dev/null
+++ b/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -0,0 +1,556 @@
+//===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that expands pseudo instructions into target
+// instructions. This pass should be run after register allocation but before
+// the post-regalloc scheduling pass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVInstrInfo.h"
+#include "RISCVTargetMachine.h"
+
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+
+using namespace llvm;
+
+#define RISCV_EXPAND_PSEUDO_NAME "RISCV pseudo instruction expansion pass"
+
+namespace {
+
+class RISCVExpandPseudo : public MachineFunctionPass {
+public:
+ const RISCVInstrInfo *TII;
+ static char ID;
+
+ RISCVExpandPseudo() : MachineFunctionPass(ID) {
+ initializeRISCVExpandPseudoPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override { return RISCV_EXPAND_PSEUDO_NAME; }
+
+private:
+ bool expandMBB(MachineBasicBlock &MBB);
+ bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicBinOp(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp,
+ bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI);
+ bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI, bool IsMasked,
+ int Width, MachineBasicBlock::iterator &NextMBBI);
+};
+
+char RISCVExpandPseudo::ID = 0;
+
+bool RISCVExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
+ TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ bool Modified = false;
+ for (auto &MBB : MF)
+ Modified |= expandMBB(MBB);
+ return Modified;
+}
+
+bool RISCVExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
+ bool Modified = false;
+
+ MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+ while (MBBI != E) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ Modified |= expandMI(MBB, MBBI, NMBBI);
+ MBBI = NMBBI;
+ }
+
+ return Modified;
+}
+
+bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI) {
+ switch (MBBI->getOpcode()) {
+ case RISCV::PseudoAtomicLoadNand32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicSwap32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadAdd32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadSub32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadNand32:
+ return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadMax32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadMin32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadUMax32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
+ NextMBBI);
+ case RISCV::PseudoMaskedAtomicLoadUMin32:
+ return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
+ NextMBBI);
+ case RISCV::PseudoCmpXchg32:
+ return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
+ case RISCV::PseudoMaskedCmpXchg32:
+ return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
+ }
+
+ return false;
+}
+
+static unsigned getLRForRMW32(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::LR_W;
+ case AtomicOrdering::Acquire:
+ return RISCV::LR_W_AQ;
+ case AtomicOrdering::Release:
+ return RISCV::LR_W;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::LR_W_AQ;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::LR_W_AQ_RL;
+ }
+}
+
+static unsigned getSCForRMW32(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ default:
+ llvm_unreachable("Unexpected AtomicOrdering");
+ case AtomicOrdering::Monotonic:
+ return RISCV::SC_W;
+ case AtomicOrdering::Acquire:
+ return RISCV::SC_W;
+ case AtomicOrdering::Release:
+ return RISCV::SC_W_RL;
+ case AtomicOrdering::AcquireRelease:
+ return RISCV::SC_W_RL;
+ case AtomicOrdering::SequentiallyConsistent:
+ return RISCV::SC_W_AQ_RL;
+ }
+}
+
+static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
+ DebugLoc DL, MachineBasicBlock *ThisMBB,
+ MachineBasicBlock *LoopMBB,
+ MachineBasicBlock *DoneMBB,
+ AtomicRMWInst::BinOp BinOp, int Width) {
+ assert(Width == 32 && "RV64 atomic expansion currently unsupported");
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned ScratchReg = MI.getOperand(1).getReg();
+ unsigned AddrReg = MI.getOperand(2).getReg();
+ unsigned IncrReg = MI.getOperand(3).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
+
+ // .loop:
+ // lr.w dest, (addr)
+ // binop scratch, dest, val
+ // sc.w scratch, scratch, (addr)
+ // bnez scratch, loop
+ BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Nand:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-1);
+ break;
+ }
+ BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopMBB);
+}
+
+static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
+ MachineBasicBlock *MBB, unsigned DestReg,
+ unsigned OldValReg, unsigned NewValReg,
+ unsigned MaskReg, unsigned ScratchReg) {
+ assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
+ assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
+ assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
+
+ // We select bits from newval and oldval using:
+ // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
+ // r = oldval ^ ((oldval ^ newval) & masktargetdata);
+ BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
+ .addReg(OldValReg)
+ .addReg(NewValReg);
+ BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(ScratchReg)
+ .addReg(MaskReg);
+ BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
+ .addReg(OldValReg)
+ .addReg(ScratchReg);
+}
+
+static void doMaskedAtomicBinOpExpansion(
+ const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
+ MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
+ MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
+ assert(Width == 32 && "RV64 atomic expansion currently unsupported");
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned ScratchReg = MI.getOperand(1).getReg();
+ unsigned AddrReg = MI.getOperand(2).getReg();
+ unsigned IncrReg = MI.getOperand(3).getReg();
+ unsigned MaskReg = MI.getOperand(4).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
+
+ // .loop:
+ // lr.w destreg, (alignedaddr)
+ // binop scratch, destreg, incr
+ // xor scratch, destreg, scratch
+ // and scratch, scratch, masktargetdata
+ // xor scratch, destreg, scratch
+ // sc.w scratch, scratch, (alignedaddr)
+ // bnez scratch, loop
+ BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
+ .addReg(RISCV::X0)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Add:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Sub:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ break;
+ case AtomicRMWInst::Nand:
+ BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(IncrReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-1);
+ break;
+ }
+
+ insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
+ ScratchReg);
+
+ BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopMBB);
+}
+
+bool RISCVExpandPseudo::expandAtomicBinOp(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI) {
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+
+ MachineFunction *MF = MBB.getParent();
+ auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopMBB);
+ MF->insert(++LoopMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopMBB->addSuccessor(LoopMBB);
+ LoopMBB->addSuccessor(DoneMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopMBB);
+
+ if (!IsMasked)
+ doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
+ else
+ doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
+ Width);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
+ MachineBasicBlock *MBB, unsigned ValReg,
+ unsigned ShamtReg) {
+ BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
+ .addReg(ValReg)
+ .addReg(ShamtReg);
+ BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
+ .addReg(ValReg)
+ .addReg(ShamtReg);
+}
+
+bool RISCVExpandPseudo::expandAtomicMinMaxOp(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
+ MachineBasicBlock::iterator &NextMBBI) {
+ assert(IsMasked == true &&
+ "Should only need to expand masked atomic max/min");
+ assert(Width == 32 && "RV64 atomic expansion currently unsupported");
+
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopHeadMBB);
+ MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
+ MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
+ MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
+ LoopHeadMBB->addSuccessor(LoopTailMBB);
+ LoopIfBodyMBB->addSuccessor(LoopTailMBB);
+ LoopTailMBB->addSuccessor(LoopHeadMBB);
+ LoopTailMBB->addSuccessor(DoneMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopHeadMBB);
+
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned Scratch1Reg = MI.getOperand(1).getReg();
+ unsigned Scratch2Reg = MI.getOperand(2).getReg();
+ unsigned AddrReg = MI.getOperand(3).getReg();
+ unsigned IncrReg = MI.getOperand(4).getReg();
+ unsigned MaskReg = MI.getOperand(5).getReg();
+ bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
+
+ //
+ // .loophead:
+ // lr.w destreg, (alignedaddr)
+ // and scratch2, destreg, mask
+ // mv scratch1, destreg
+ // [sext scratch2 if signed min/max]
+ // ifnochangeneeded scratch2, incr, .looptail
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
+ .addReg(DestReg)
+ .addReg(MaskReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
+ .addReg(DestReg)
+ .addImm(0);
+
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Max: {
+ insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
+ .addReg(Scratch2Reg)
+ .addReg(IncrReg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+ case AtomicRMWInst::Min: {
+ insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
+ .addReg(IncrReg)
+ .addReg(Scratch2Reg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+ case AtomicRMWInst::UMax:
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
+ .addReg(Scratch2Reg)
+ .addReg(IncrReg)
+ .addMBB(LoopTailMBB);
+ break;
+ case AtomicRMWInst::UMin:
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
+ .addReg(IncrReg)
+ .addReg(Scratch2Reg)
+ .addMBB(LoopTailMBB);
+ break;
+ }
+
+ // .loopifbody:
+ // xor scratch1, destreg, incr
+ // and scratch1, scratch1, mask
+ // xor scratch1, destreg, scratch1
+ insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
+ MaskReg, Scratch1Reg);
+
+ // .looptail:
+ // sc.w scratch1, scratch1, (addr)
+ // bnez scratch1, loop
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
+ .addReg(AddrReg)
+ .addReg(Scratch1Reg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(Scratch1Reg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+bool RISCVExpandPseudo::expandAtomicCmpXchg(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
+ int Width, MachineBasicBlock::iterator &NextMBBI) {
+ assert(Width == 32 && "RV64 atomic expansion currently unsupported");
+ MachineInstr &MI = *MBBI;
+ DebugLoc DL = MI.getDebugLoc();
+ MachineFunction *MF = MBB.getParent();
+ auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+ auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
+
+ // Insert new MBBs.
+ MF->insert(++MBB.getIterator(), LoopHeadMBB);
+ MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
+ MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
+
+ // Set up successors and transfer remaining instructions to DoneMBB.
+ LoopHeadMBB->addSuccessor(LoopTailMBB);
+ LoopHeadMBB->addSuccessor(DoneMBB);
+ LoopTailMBB->addSuccessor(DoneMBB);
+ LoopTailMBB->addSuccessor(LoopHeadMBB);
+ DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
+ DoneMBB->transferSuccessors(&MBB);
+ MBB.addSuccessor(LoopHeadMBB);
+
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned ScratchReg = MI.getOperand(1).getReg();
+ unsigned AddrReg = MI.getOperand(2).getReg();
+ unsigned CmpValReg = MI.getOperand(3).getReg();
+ unsigned NewValReg = MI.getOperand(4).getReg();
+ AtomicOrdering Ordering =
+ static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
+
+ if (!IsMasked) {
+ // .loophead:
+ // lr.w dest, (addr)
+ // bne dest, cmpval, done
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
+ .addReg(DestReg)
+ .addReg(CmpValReg)
+ .addMBB(DoneMBB);
+ // .looptail:
+ // sc.w scratch, newval, (addr)
+ // bnez scratch, loophead
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(NewValReg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+ } else {
+ // .loophead:
+ // lr.w dest, (addr)
+ // and scratch, dest, mask
+ // bne scratch, cmpval, done
+ unsigned MaskReg = MI.getOperand(5).getReg();
+ BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
+ .addReg(AddrReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
+ .addReg(DestReg)
+ .addReg(MaskReg);
+ BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(CmpValReg)
+ .addMBB(DoneMBB);
+
+ // .looptail:
+ // xor scratch, dest, newval
+ // and scratch, scratch, mask
+ // xor scratch, dest, scratch
+ // sc.w scratch, scratch, (adrr)
+ // bnez scratch, loophead
+ insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
+ MaskReg, ScratchReg);
+ BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
+ .addReg(AddrReg)
+ .addReg(ScratchReg);
+ BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
+ .addReg(ScratchReg)
+ .addReg(RISCV::X0)
+ .addMBB(LoopHeadMBB);
+ }
+
+ NextMBBI = MBB.end();
+ MI.eraseFromParent();
+
+ LivePhysRegs LiveRegs;
+ computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
+ computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
+ computeAndAddLiveIns(LiveRegs, *DoneMBB);
+
+ return true;
+}
+
+} // end of anonymous namespace
+
+INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",
+ RISCV_EXPAND_PSEUDO_NAME, false, false)
+namespace llvm {
+
+FunctionPass *createRISCVExpandPseudoPass() { return new RISCVExpandPseudo(); }
+
+} // end of namespace llvm
diff --git a/lib/Target/RISCV/RISCVFrameLowering.cpp b/lib/Target/RISCV/RISCVFrameLowering.cpp
index a816028f9d8b..74417899c8da 100644
--- a/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -148,8 +148,7 @@ void RISCVFrameLowering::emitEpilogue(MachineFunction &MF,
// Skip to before the restores of callee-saved registers
// FIXME: assumes exactly one instruction is used to restore each
// callee-saved register.
- MachineBasicBlock::iterator LastFrameDestroy = MBBI;
- std::advance(LastFrameDestroy, -MFI.getCalleeSavedInfo().size());
+ auto LastFrameDestroy = std::prev(MBBI, MFI.getCalleeSavedInfo().size());
uint64_t StackSize = MFI.getStackSize();
diff --git a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 04441b9a9b15..aa80365feb83 100644
--- a/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "RISCV.h"
#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "RISCV.h"
#include "RISCVTargetMachine.h"
+#include "Utils/RISCVMatInt.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Support/Debug.h"
@@ -56,20 +57,47 @@ public:
private:
void doPeepholeLoadStoreADDI();
- void doPeepholeBuildPairF64SplitF64();
};
}
void RISCVDAGToDAGISel::PostprocessISelDAG() {
doPeepholeLoadStoreADDI();
- doPeepholeBuildPairF64SplitF64();
}
-void RISCVDAGToDAGISel::Select(SDNode *Node) {
- unsigned Opcode = Node->getOpcode();
- MVT XLenVT = Subtarget->getXLenVT();
+static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
+ MVT XLenVT) {
+ RISCVMatInt::InstSeq Seq;
+ RISCVMatInt::generateInstSeq(Imm, XLenVT == MVT::i64, Seq);
+
+ SDNode *Result;
+ SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT);
+ for (RISCVMatInt::Inst &Inst : Seq) {
+ SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT);
+ if (Inst.Opc == RISCV::LUI)
+ Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm);
+ else
+ Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm);
+
+ // Only the first instruction has X0 as its source.
+ SrcReg = SDValue(Result, 0);
+ }
+
+ return Result;
+}
+
+// Returns true if the Node is an ISD::AND with a constant argument. If so,
+// set Mask to that constant value.
+static bool isConstantMask(SDNode *Node, uint64_t &Mask) {
+ if (Node->getOpcode() == ISD::AND &&
+ Node->getOperand(1).getOpcode() == ISD::Constant) {
+ Mask = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
+ return true;
+ }
+ return false;
+}
- // If we have a custom node, we have already selected
+void RISCVDAGToDAGISel::Select(SDNode *Node) {
+ // If we have a custom node, we have already selected.
if (Node->isMachineOpcode()) {
LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
Node->setNodeId(-1);
@@ -78,27 +106,58 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Instruction Selection not handled by the auto-generated tablegen selection
// should be handled here.
+ unsigned Opcode = Node->getOpcode();
+ MVT XLenVT = Subtarget->getXLenVT();
+ SDLoc DL(Node);
EVT VT = Node->getValueType(0);
- if (Opcode == ISD::Constant && VT == XLenVT) {
- auto *ConstNode = cast<ConstantSDNode>(Node);
- // Materialize zero constants as copies from X0. This allows the coalescer
- // to propagate these into other instructions.
- if (ConstNode->isNullValue()) {
+
+ switch (Opcode) {
+ case ISD::Constant: {
+ auto ConstNode = cast<ConstantSDNode>(Node);
+ if (VT == XLenVT && ConstNode->isNullValue()) {
SDValue New = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(Node),
RISCV::X0, XLenVT);
ReplaceNode(Node, New.getNode());
return;
}
+ int64_t Imm = ConstNode->getSExtValue();
+ if (XLenVT == MVT::i64) {
+ ReplaceNode(Node, selectImm(CurDAG, SDLoc(Node), Imm, XLenVT));
+ return;
+ }
+ break;
}
- if (Opcode == ISD::FrameIndex) {
- SDLoc DL(Node);
+ case ISD::FrameIndex: {
SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT);
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
- EVT VT = Node->getValueType(0);
SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT);
ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm));
return;
}
+ case ISD::SRL: {
+ if (!Subtarget->is64Bit())
+ break;
+ SDValue Op0 = Node->getOperand(0);
+ SDValue Op1 = Node->getOperand(1);
+ uint64_t Mask;
+ // Match (srl (and val, mask), imm) where the result would be a
+ // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result
+ // is equivalent to this (SimplifyDemandedBits may have removed lower bits
+ // from the mask that aren't necessary due to the right-shifting).
+ if (Op1.getOpcode() == ISD::Constant &&
+ isConstantMask(Op0.getNode(), Mask)) {
+ uint64_t ShAmt = cast<ConstantSDNode>(Op1.getNode())->getZExtValue();
+
+ if ((Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff) {
+ SDValue ShAmtVal =
+ CurDAG->getTargetConstant(ShAmt, SDLoc(Node), XLenVT);
+ CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0.getOperand(0),
+ ShAmtVal);
+ return;
+ }
+ }
+ }
+ }
// Select the default instruction.
SelectCode(Node);
@@ -216,43 +275,6 @@ void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() {
}
}
-// Remove redundant BuildPairF64+SplitF64 pairs. i.e. cases where an f64 is
-// built of two i32 values, only to be split apart again. This must be done
-// here as a peephole optimisation as the DAG has not been fully legalized at
-// the point BuildPairF64/SplitF64 nodes are created in RISCVISelLowering, so
-// some nodes would not yet have been replaced with libcalls.
-void RISCVDAGToDAGISel::doPeepholeBuildPairF64SplitF64() {
- SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode());
- ++Position;
-
- while (Position != CurDAG->allnodes_begin()) {
- SDNode *N = &*--Position;
- // Skip dead nodes and any nodes other than SplitF64Pseudo.
- if (N->use_empty() || !N->isMachineOpcode() ||
- !(N->getMachineOpcode() == RISCV::SplitF64Pseudo))
- continue;
-
- // If the operand to SplitF64 is a BuildPairF64, the split operation is
- // redundant. Just use the operands to BuildPairF64 as the result.
- SDValue F64Val = N->getOperand(0);
- if (F64Val.isMachineOpcode() &&
- F64Val.getMachineOpcode() == RISCV::BuildPairF64Pseudo) {
- LLVM_DEBUG(
- dbgs() << "Removing redundant SplitF64Pseudo and replacing uses "
- "with BuildPairF64Pseudo operands:\n");
- LLVM_DEBUG(dbgs() << "N: ");
- LLVM_DEBUG(N->dump(CurDAG));
- LLVM_DEBUG(dbgs() << "F64Val: ");
- LLVM_DEBUG(F64Val->dump(CurDAG));
- LLVM_DEBUG(dbgs() << "\n");
- SDValue From[] = {SDValue(N, 0), SDValue(N, 1)};
- SDValue To[] = {F64Val.getOperand(0), F64Val.getOperand(1)};
- CurDAG->ReplaceAllUsesOfValuesWith(From, To, 2);
- }
- }
- CurDAG->RemoveDeadNodes();
-}
-
// This pass converts a legalized DAG into a RISCV-specific DAG, ready
// for instruction scheduling.
FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) {
diff --git a/lib/Target/RISCV/RISCVISelLowering.cpp b/lib/Target/RISCV/RISCVISelLowering.cpp
index 87796e5b1097..508dcbd009ed 100644
--- a/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -80,6 +80,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
+ if (Subtarget.is64Bit()) {
+ setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine(ISD::SRL);
+ setTargetDAGCombine(ISD::SRA);
+ setTargetDAGCombine(ISD::ANY_EXTEND);
+ }
+
if (!Subtarget.hasStdExtM()) {
setOperationAction(ISD::MUL, XLenVT, Expand);
setOperationAction(ISD::MULHS, XLenVT, Expand);
@@ -111,6 +118,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE,
ISD::SETGT, ISD::SETGE, ISD::SETNE};
+ ISD::NodeType FPOpToExtend[] = {
+ ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM};
+
if (Subtarget.hasStdExtF()) {
setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
@@ -119,6 +129,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Custom);
setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ for (auto Op : FPOpToExtend)
+ setOperationAction(Op, MVT::f32, Expand);
}
if (Subtarget.hasStdExtD()) {
@@ -131,16 +143,20 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_CC, MVT::f64, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ for (auto Op : FPOpToExtend)
+ setOperationAction(Op, MVT::f64, Expand);
}
setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
setOperationAction(ISD::BlockAddress, XLenVT, Custom);
setOperationAction(ISD::ConstantPool, XLenVT, Custom);
- if (Subtarget.hasStdExtA())
+ if (Subtarget.hasStdExtA()) {
setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
- else
+ setMinCmpXchgSizeInBits(32);
+ } else {
setMaxAtomicSizeInBitsSupported(0);
+ }
setBooleanContents(ZeroOrOneBooleanContent);
@@ -160,6 +176,34 @@ EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
return VT.changeVectorElementTypeToInteger();
}
+bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
+ const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ default:
+ return false;
+ case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
+ case Intrinsic::riscv_masked_atomicrmw_add_i32:
+ case Intrinsic::riscv_masked_atomicrmw_sub_i32:
+ case Intrinsic::riscv_masked_atomicrmw_nand_i32:
+ case Intrinsic::riscv_masked_atomicrmw_max_i32:
+ case Intrinsic::riscv_masked_atomicrmw_min_i32:
+ case Intrinsic::riscv_masked_atomicrmw_umax_i32:
+ case Intrinsic::riscv_masked_atomicrmw_umin_i32:
+ case Intrinsic::riscv_masked_cmpxchg_i32:
+ PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = MVT::getVT(PtrTy->getElementType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.align = 4;
+ Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
+ MachineMemOperand::MOVolatile;
+ return true;
+ }
+}
+
bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS,
@@ -228,6 +272,10 @@ bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
return TargetLowering::isZExtFree(Val, VT2);
}
+bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
+ return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
+}
+
// Changes the condition code and swaps operands if necessary, so the SetCC
// operation matches one of the comparisons supported directly in the RISC-V
// ISA.
@@ -283,9 +331,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::VASTART:
return lowerVASTART(Op, DAG);
case ISD::FRAMEADDR:
- return LowerFRAMEADDR(Op, DAG);
+ return lowerFRAMEADDR(Op, DAG);
case ISD::RETURNADDR:
- return LowerRETURNADDR(Op, DAG);
+ return lowerRETURNADDR(Op, DAG);
}
}
@@ -298,7 +346,7 @@ SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
int64_t Offset = N->getOffset();
MVT XLenVT = Subtarget.getXLenVT();
- if (isPositionIndependent() || Subtarget.is64Bit())
+ if (isPositionIndependent())
report_fatal_error("Unable to lowerGlobalAddress");
// In order to maximise the opportunity for common subexpression elimination,
// emit a separate ADD node for the global address offset instead of folding
@@ -323,7 +371,7 @@ SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
const BlockAddress *BA = N->getBlockAddress();
int64_t Offset = N->getOffset();
- if (isPositionIndependent() || Subtarget.is64Bit())
+ if (isPositionIndependent())
report_fatal_error("Unable to lowerBlockAddress");
SDValue BAHi = DAG.getTargetBlockAddress(BA, Ty, Offset, RISCVII::MO_HI);
@@ -357,26 +405,6 @@ SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
}
}
-SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT Ty = Op.getValueType();
- ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op);
- const char *Sym = N->getSymbol();
-
- // TODO: should also handle gp-relative loads.
-
- if (isPositionIndependent() || Subtarget.is64Bit())
- report_fatal_error("Unable to lowerExternalSymbol");
-
- SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI);
- SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO);
- SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
- SDValue MNLo =
- SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
- return MNLo;
-}
-
SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
SDValue CondV = Op.getOperand(0);
SDValue TrueV = Op.getOperand(1);
@@ -432,7 +460,7 @@ SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo(SV));
}
-SDValue RISCVTargetLowering::LowerFRAMEADDR(SDValue Op,
+SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
@@ -455,7 +483,7 @@ SDValue RISCVTargetLowering::LowerFRAMEADDR(SDValue Op,
return FrameAddr;
}
-SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op,
+SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
MachineFunction &MF = DAG.getMachineFunction();
@@ -472,7 +500,7 @@ SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op,
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
if (Depth) {
int Off = -XLenInBytes;
- SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
+ SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
SDValue Offset = DAG.getConstant(Off, DL, VT);
return DAG.getLoad(VT, DL, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
@@ -485,6 +513,84 @@ SDValue RISCVTargetLowering::LowerRETURNADDR(SDValue Op,
return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
}
+// Return true if the given node is a shift with a non-constant shift amount.
+static bool isVariableShift(SDValue Val) {
+ switch (Val.getOpcode()) {
+ default:
+ return false;
+ case ISD::SHL:
+ case ISD::SRA:
+ case ISD::SRL:
+ return Val.getOperand(1).getOpcode() != ISD::Constant;
+ }
+}
+
+// Returns true if the given node is an sdiv, udiv, or urem with non-constant
+// operands.
+static bool isVariableSDivUDivURem(SDValue Val) {
+ switch (Val.getOpcode()) {
+ default:
+ return false;
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::UREM:
+ return Val.getOperand(0).getOpcode() != ISD::Constant &&
+ Val.getOperand(1).getOpcode() != ISD::Constant;
+ }
+}
+
+SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ SelectionDAG &DAG = DCI.DAG;
+
+ switch (N->getOpcode()) {
+ default:
+ break;
+ case ISD::SHL:
+ case ISD::SRL:
+ case ISD::SRA: {
+ assert(Subtarget.getXLen() == 64 && "Combine should be 64-bit only");
+ if (!DCI.isBeforeLegalize())
+ break;
+ SDValue RHS = N->getOperand(1);
+ if (N->getValueType(0) != MVT::i32 || RHS->getOpcode() == ISD::Constant ||
+ (RHS->getOpcode() == ISD::AssertZext &&
+ cast<VTSDNode>(RHS->getOperand(1))->getVT().getSizeInBits() <= 5))
+ break;
+ SDValue LHS = N->getOperand(0);
+ SDLoc DL(N);
+ SDValue NewRHS =
+ DAG.getNode(ISD::AssertZext, DL, RHS.getValueType(), RHS,
+ DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 5)));
+ return DCI.CombineTo(
+ N, DAG.getNode(N->getOpcode(), DL, LHS.getValueType(), LHS, NewRHS));
+ }
+ case ISD::ANY_EXTEND: {
+ // If any-extending an i32 variable-length shift or sdiv/udiv/urem to i64,
+ // then instead sign-extend in order to increase the chance of being able
+ // to select the sllw/srlw/sraw/divw/divuw/remuw instructions.
+ SDValue Src = N->getOperand(0);
+ if (N->getValueType(0) != MVT::i64 || Src.getValueType() != MVT::i32)
+ break;
+ if (!isVariableShift(Src) &&
+ !(Subtarget.hasStdExtM() && isVariableSDivUDivURem(Src)))
+ break;
+ SDLoc DL(N);
+ return DCI.CombineTo(N, DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src));
+ }
+ case RISCVISD::SplitF64: {
+ // If the input to SplitF64 is just BuildPairF64 then the operation is
+ // redundant. Instead, use BuildPairF64's operands directly.
+ SDValue Op0 = N->getOperand(0);
+ if (Op0->getOpcode() != RISCVISD::BuildPairF64)
+ break;
+ return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
+ }
+ }
+
+ return SDValue();
+}
+
static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
MachineBasicBlock *BB) {
assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
@@ -807,10 +913,14 @@ static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
if (Reg) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
- } else {
- State.addLoc(
- CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
+ return false;
}
+
+ if (ValVT == MVT::f32) {
+ LocVT = MVT::f32;
+ LocInfo = CCValAssign::Full;
+ }
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
return false;
}
@@ -859,6 +969,22 @@ void RISCVTargetLowering::analyzeOutputArgs(
}
}
+// Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
+// values.
+static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
+ const CCValAssign &VA, const SDLoc &DL) {
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unexpected CCValAssign::LocInfo");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::BCvt:
+ Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
+ break;
+ }
+ return Val;
+}
+
// The caller is responsible for loading the full value if the argument is
// passed with CCValAssign::Indirect.
static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
@@ -866,21 +992,29 @@ static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
MachineFunction &MF = DAG.getMachineFunction();
MachineRegisterInfo &RegInfo = MF.getRegInfo();
EVT LocVT = VA.getLocVT();
- EVT ValVT = VA.getValVT();
SDValue Val;
unsigned VReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
+ if (VA.getLocInfo() == CCValAssign::Indirect)
+ return Val;
+
+ return convertLocVTToValVT(DAG, Val, VA, DL);
+}
+
+static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
+ const CCValAssign &VA, const SDLoc &DL) {
+ EVT LocVT = VA.getLocVT();
+
switch (VA.getLocInfo()) {
default:
llvm_unreachable("Unexpected CCValAssign::LocInfo");
case CCValAssign::Full:
- case CCValAssign::Indirect:
break;
case CCValAssign::BCvt:
- Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
+ Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
break;
}
return Val;
@@ -995,7 +1129,6 @@ SDValue RISCVTargetLowering::LowerFormalArguments(
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
- assert(VA.getLocVT() == XLenVT && "Unhandled argument type");
SDValue ArgValue;
// Passing f64 on RV32D with a soft float ABI must be handled as a special
// case.
@@ -1282,13 +1415,7 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Promote the value if needed.
// For now, only handle fully promoted and indirect arguments.
- switch (VA.getLocInfo()) {
- case CCValAssign::Full:
- break;
- case CCValAssign::BCvt:
- ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), ArgValue);
- break;
- case CCValAssign::Indirect: {
+ if (VA.getLocInfo() == CCValAssign::Indirect) {
// Store the argument in a stack slot and pass its address.
SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
@@ -1310,10 +1437,8 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
++i;
}
ArgValue = SpillSlot;
- break;
- }
- default:
- llvm_unreachable("Unknown loc info!");
+ } else {
+ ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
}
// Use local copy if it is a byval arg.
@@ -1415,6 +1540,7 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
// Glue the RetValue to the end of the call sequence
Chain = RetValue.getValue(1);
Glue = RetValue.getValue(2);
+
if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
SDValue RetValue2 =
@@ -1425,15 +1551,7 @@ SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
RetValue2);
}
- switch (VA.getLocInfo()) {
- default:
- llvm_unreachable("Unknown loc info!");
- case CCValAssign::Full:
- break;
- case CCValAssign::BCvt:
- RetValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), RetValue);
- break;
- }
+ RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
InVals.push_back(RetValue);
}
@@ -1456,22 +1574,6 @@ bool RISCVTargetLowering::CanLowerReturn(
return true;
}
-static SDValue packIntoRegLoc(SelectionDAG &DAG, SDValue Val,
- const CCValAssign &VA, const SDLoc &DL) {
- EVT LocVT = VA.getLocVT();
-
- switch (VA.getLocInfo()) {
- default:
- llvm_unreachable("Unexpected CCValAssign::LocInfo");
- case CCValAssign::Full:
- break;
- case CCValAssign::BCvt:
- Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
- break;
- }
- return Val;
-}
-
SDValue
RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool IsVarArg,
@@ -1514,7 +1616,7 @@ RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
} else {
// Handle a 'normal' return.
- Val = packIntoRegLoc(DAG, Val, VA, DL);
+ Val = convertValVTToLocVT(DAG, Val, VA, DL);
Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
// Guarantee that all emitted copies are stuck together.
@@ -1616,3 +1718,83 @@ Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
return Builder.CreateFence(AtomicOrdering::Acquire);
return nullptr;
}
+
+TargetLowering::AtomicExpansionKind
+RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ unsigned Size = AI->getType()->getPrimitiveSizeInBits();
+ if (Size == 8 || Size == 16)
+ return AtomicExpansionKind::MaskedIntrinsic;
+ return AtomicExpansionKind::None;
+}
+
+static Intrinsic::ID
+getIntrinsicForMaskedAtomicRMWBinOp32(AtomicRMWInst::BinOp BinOp) {
+ switch (BinOp) {
+ default:
+ llvm_unreachable("Unexpected AtomicRMW BinOp");
+ case AtomicRMWInst::Xchg:
+ return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
+ case AtomicRMWInst::Add:
+ return Intrinsic::riscv_masked_atomicrmw_add_i32;
+ case AtomicRMWInst::Sub:
+ return Intrinsic::riscv_masked_atomicrmw_sub_i32;
+ case AtomicRMWInst::Nand:
+ return Intrinsic::riscv_masked_atomicrmw_nand_i32;
+ case AtomicRMWInst::Max:
+ return Intrinsic::riscv_masked_atomicrmw_max_i32;
+ case AtomicRMWInst::Min:
+ return Intrinsic::riscv_masked_atomicrmw_min_i32;
+ case AtomicRMWInst::UMax:
+ return Intrinsic::riscv_masked_atomicrmw_umax_i32;
+ case AtomicRMWInst::UMin:
+ return Intrinsic::riscv_masked_atomicrmw_umin_i32;
+ }
+}
+
+Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
+ IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
+ Value *Ordering = Builder.getInt32(static_cast<uint32_t>(AI->getOrdering()));
+ Type *Tys[] = {AlignedAddr->getType()};
+ Function *LrwOpScwLoop = Intrinsic::getDeclaration(
+ AI->getModule(),
+ getIntrinsicForMaskedAtomicRMWBinOp32(AI->getOperation()), Tys);
+
+ // Must pass the shift amount needed to sign extend the loaded value prior
+ // to performing a signed comparison for min/max. ShiftAmt is the number of
+ // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
+ // is the number of bits to left+right shift the value in order to
+ // sign-extend.
+ if (AI->getOperation() == AtomicRMWInst::Min ||
+ AI->getOperation() == AtomicRMWInst::Max) {
+ const DataLayout &DL = AI->getModule()->getDataLayout();
+ unsigned ValWidth =
+ DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
+ Value *SextShamt = Builder.CreateSub(
+ Builder.getInt32(Subtarget.getXLen() - ValWidth), ShiftAmt);
+ return Builder.CreateCall(LrwOpScwLoop,
+ {AlignedAddr, Incr, Mask, SextShamt, Ordering});
+ }
+
+ return Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
+}
+
+TargetLowering::AtomicExpansionKind
+RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
+ AtomicCmpXchgInst *CI) const {
+ unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
+ if (Size == 8 || Size == 16)
+ return AtomicExpansionKind::MaskedIntrinsic;
+ return AtomicExpansionKind::None;
+}
+
+Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
+ IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
+ Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
+ Value *Ordering = Builder.getInt32(static_cast<uint32_t>(Ord));
+ Type *Tys[] = {AlignedAddr->getType()};
+ Function *MaskedCmpXchg = Intrinsic::getDeclaration(
+ CI->getModule(), Intrinsic::riscv_masked_cmpxchg_i32, Tys);
+ return Builder.CreateCall(MaskedCmpXchg,
+ {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
+}
diff --git a/lib/Target/RISCV/RISCVISelLowering.h b/lib/Target/RISCV/RISCVISelLowering.h
index 280adb29fd02..6970900bb062 100644
--- a/lib/Target/RISCV/RISCVISelLowering.h
+++ b/lib/Target/RISCV/RISCVISelLowering.h
@@ -43,6 +43,9 @@ public:
explicit RISCVTargetLowering(const TargetMachine &TM,
const RISCVSubtarget &STI);
+ bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
+ MachineFunction &MF,
+ unsigned Intrinsic) const override;
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
unsigned AS,
Instruction *I = nullptr) const override;
@@ -51,10 +54,13 @@ public:
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
bool isZExtFree(SDValue Val, EVT VT2) const override;
+ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
// Provide custom lowering hooks for some operations.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+
// This method returns the name of a target specific DAG node.
const char *getTargetNodeName(unsigned Opcode) const override;
@@ -107,15 +113,27 @@ private:
SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
bool IsEligibleForTailCallOptimization(CCState &CCInfo,
CallLoweringInfo &CLI, MachineFunction &MF,
const SmallVector<CCValAssign, 16> &ArgLocs) const;
+
+ TargetLowering::AtomicExpansionKind
+ shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+ virtual Value *emitMaskedAtomicRMWIntrinsic(
+ IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
+ Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override;
+ TargetLowering::AtomicExpansionKind
+ shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
+ virtual Value *
+ emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, AtomicCmpXchgInst *CI,
+ Value *AlignedAddr, Value *CmpVal,
+ Value *NewVal, Value *Mask,
+ AtomicOrdering Ord) const override;
};
}
diff --git a/lib/Target/RISCV/RISCVInstrFormats.td b/lib/Target/RISCV/RISCVInstrFormats.td
index 529e048045c6..ebd676a6056e 100644
--- a/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/lib/Target/RISCV/RISCVInstrFormats.td
@@ -45,11 +45,12 @@ def InstFormatCSS : InstFormat<10>;
def InstFormatCIW : InstFormat<11>;
def InstFormatCL : InstFormat<12>;
def InstFormatCS : InstFormat<13>;
-def InstFormatCB : InstFormat<14>;
-def InstFormatCJ : InstFormat<15>;
-def InstFormatOther : InstFormat<16>;
+def InstFormatCA : InstFormat<14>;
+def InstFormatCB : InstFormat<15>;
+def InstFormatCJ : InstFormat<16>;
+def InstFormatOther : InstFormat<17>;
-// The following opcode names and match those given in Table 19.1 in the
+// The following opcode names match those given in Table 19.1 in the
// RISC-V User-level ISA specification ("RISC-V base opcode map").
class RISCVOpcode<bits<7> val> {
bits<7> Value = val;
diff --git a/lib/Target/RISCV/RISCVInstrFormatsC.td b/lib/Target/RISCV/RISCVInstrFormatsC.td
index 6abcbd7cc8a1..bda8bbb558eb 100644
--- a/lib/Target/RISCV/RISCVInstrFormatsC.td
+++ b/lib/Target/RISCV/RISCVInstrFormatsC.td
@@ -118,6 +118,19 @@ class RVInst16CS<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
let Inst{1-0} = opcode;
}
+class RVInst16CA<bits<6> funct6, bits<2> funct2, bits<2> opcode, dag outs,
+ dag ins, string opcodestr, string argstr>
+ : RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCA> {
+ bits<3> rs2;
+ bits<3> rs1;
+
+ let Inst{15-10} = funct6;
+ let Inst{9-7} = rs1;
+ let Inst{6-5} = funct2;
+ let Inst{4-2} = rs2;
+ let Inst{1-0} = opcode;
+}
+
class RVInst16CB<bits<3> funct3, bits<2> opcode, dag outs, dag ins,
string opcodestr, string argstr>
: RVInst16<outs, ins, opcodestr, argstr, [], InstFormatCB> {
diff --git a/lib/Target/RISCV/RISCVInstrInfo.cpp b/lib/Target/RISCV/RISCVInstrInfo.cpp
index 327e4a7d615f..76c74368ca11 100644
--- a/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -362,9 +362,8 @@ unsigned RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
MachineFunction *MF = MBB.getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
const auto &TM = static_cast<const RISCVTargetMachine &>(MF->getTarget());
- const auto &STI = MF->getSubtarget<RISCVSubtarget>();
- if (TM.isPositionIndependent() || STI.is64Bit())
+ if (TM.isPositionIndependent())
report_fatal_error("Unable to insert indirect branch");
if (!isInt<32>(BrOffset))
diff --git a/lib/Target/RISCV/RISCVInstrInfo.td b/lib/Target/RISCV/RISCVInstrInfo.td
index b51e4e70330d..d7cc13d4fabd 100644
--- a/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/lib/Target/RISCV/RISCVInstrInfo.td
@@ -125,11 +125,6 @@ def simm12 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<12>(Imm);}]> {
}];
}
-def uimm12 : Operand<XLenVT> {
- let ParserMatchClass = UImmAsmOperand<12>;
- let DecoderMethod = "decodeUImmOperand<12>";
-}
-
// A 13-bit signed immediate where the least significant bit is zero.
def simm13_lsb0 : Operand<OtherVT> {
let ParserMatchClass = SImmAsmOperand<13, "Lsb0">;
@@ -143,8 +138,7 @@ def simm13_lsb0 : Operand<OtherVT> {
}];
}
-def uimm20 : Operand<XLenVT> {
- let ParserMatchClass = UImmAsmOperand<20>;
+class UImm20Operand : Operand<XLenVT> {
let EncoderMethod = "getImmOpValue";
let DecoderMethod = "decodeUImmOperand<20>";
let MCOperandPredicate = [{
@@ -155,9 +149,20 @@ def uimm20 : Operand<XLenVT> {
}];
}
+def uimm20_lui : UImm20Operand {
+ let ParserMatchClass = UImmAsmOperand<20, "LUI">;
+}
+def uimm20_auipc : UImm20Operand {
+ let ParserMatchClass = UImmAsmOperand<20, "AUIPC">;
+}
+
+def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> {
+ let ParserMethod = "parseJALOffset";
+}
+
// A 21-bit signed immediate where the least significant bit is zero.
-def simm21_lsb0 : Operand<OtherVT> {
- let ParserMatchClass = SImmAsmOperand<21, "Lsb0">;
+def simm21_lsb0_jal : Operand<OtherVT> {
+ let ParserMatchClass = Simm21Lsb0JALAsmOperand;
let EncoderMethod = "getImmOpValueAsr1";
let DecoderMethod = "decodeSImmOperandAndLsl1<21>";
let MCOperandPredicate = [{
@@ -172,24 +177,42 @@ def BareSymbol : AsmOperandClass {
let Name = "BareSymbol";
let RenderMethod = "addImmOperands";
let DiagnosticType = "InvalidBareSymbol";
+ let ParserMethod = "parseBareSymbol";
}
// A bare symbol.
def bare_symbol : Operand<XLenVT> {
let ParserMatchClass = BareSymbol;
- let MCOperandPredicate = [{
- return MCOp.isBareSymbolRef();
- }];
+}
+
+def CSRSystemRegister : AsmOperandClass {
+ let Name = "CSRSystemRegister";
+ let ParserMethod = "parseCSRSystemRegister";
+ let DiagnosticType = "InvalidCSRSystemRegister";
+}
+
+def csr_sysreg : Operand<XLenVT> {
+ let ParserMatchClass = CSRSystemRegister;
+ let PrintMethod = "printCSRSystemRegister";
+ let DecoderMethod = "decodeUImmOperand<12>";
}
// A parameterized register class alternative to i32imm/i64imm from Target.td.
-def ixlenimm : Operand<XLenVT> {
- let ParserMatchClass = ImmXLenAsmOperand<"">;
+def ixlenimm : Operand<XLenVT>;
+
+def ixlenimm_li : Operand<XLenVT> {
+ let ParserMatchClass = ImmXLenAsmOperand<"", "LI">;
}
// Standalone (codegen-only) immleaf patterns.
def simm32 : ImmLeaf<XLenVT, [{return isInt<32>(Imm);}]>;
def simm32hi20 : ImmLeaf<XLenVT, [{return isShiftedInt<20, 12>(Imm);}]>;
+// A mask value that won't affect significant shift bits.
+def immbottomxlenset : ImmLeaf<XLenVT, [{
+ if (Subtarget->is64Bit())
+ return countTrailingOnes<uint64_t>(Imm) >= 6;
+ return countTrailingOnes<uint64_t>(Imm) >= 5;
+}]>;
// Addressing modes.
// Necessary because a frameindex can't be matched directly in a pattern.
@@ -255,13 +278,13 @@ class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
class CSR_ir<bits<3> funct3, string opcodestr>
- : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins uimm12:$imm12, GPR:$rs1),
+ : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1),
opcodestr, "$rd, $imm12, $rs1">;
let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
class CSR_ii<bits<3> funct3, string opcodestr>
: RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd),
- (ins uimm12:$imm12, uimm5:$rs1),
+ (ins csr_sysreg:$imm12, uimm5:$rs1),
opcodestr, "$rd, $imm12, $rs1">;
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
@@ -285,14 +308,14 @@ class Priv<string opcodestr, bits<7> funct7>
//===----------------------------------------------------------------------===//
let hasSideEffects = 0, isReMaterializable = 1, mayLoad = 0, mayStore = 0 in {
-def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20:$imm20),
+def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20),
"lui", "$rd, $imm20">;
-def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20:$imm20),
+def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20),
"auipc", "$rd, $imm20">;
let isCall = 1 in
-def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0:$imm20),
+def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20),
"jal", "$rd, $imm20">;
let isCall = 1 in
@@ -379,6 +402,15 @@ def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", ""> {
let rd = 0;
let imm12 = 1;
}
+
+// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
+// instruction (i.e., it should always trap, if your implementation has invalid
+// instruction traps).
+def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", ""> {
+ let rs1 = 0;
+ let rd = 0;
+ let imm12 = 0b110000000000;
+}
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
def CSRRW : CSR_ir<0b001, "csrrw">;
@@ -467,7 +499,7 @@ def : InstAlias<"nop", (ADDI X0, X0, 0)>;
// expanded to real instructions immediately.
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32,
isCodeGenOnly = 0, isAsmParserOnly = 1 in
-def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm:$imm), [],
+def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [],
"li", "$rd, $imm">;
def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>;
@@ -516,8 +548,8 @@ def : InstAlias<"bleu $rs, $rt, $offset",
(BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
// "ret" has more weight since "ret" and "jr" alias the same "jalr" instruction.
-def : InstAlias<"j $offset", (JAL X0, simm21_lsb0:$offset)>;
-def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0:$offset)>;
+def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>;
+def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>;
def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0)>;
def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0)>;
def : InstAlias<"ret", (JALR X0, X1, 0), 2>;
@@ -538,18 +570,67 @@ def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, 0xC80, X0)>;
def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, 0xC81, X0)>;
} // Predicates = [IsRV32]
-def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, uimm12:$csr, X0)>;
-def : InstAlias<"csrw $csr, $rs", (CSRRW X0, uimm12:$csr, GPR:$rs)>;
-def : InstAlias<"csrs $csr, $rs", (CSRRS X0, uimm12:$csr, GPR:$rs)>;
-def : InstAlias<"csrc $csr, $rs", (CSRRC X0, uimm12:$csr, GPR:$rs)>;
+def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>;
+def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>;
+def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>;
+def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>;
-def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, uimm12:$csr, uimm5:$imm)>;
-def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, uimm12:$csr, uimm5:$imm)>;
-def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, uimm12:$csr, uimm5:$imm)>;
+def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
+
+let EmitPriority = 0 in {
+def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
+
+def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
+def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
+}
def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0)>;
def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>;
+let EmitPriority = 0 in {
+def : InstAlias<"add $rd, $rs1, $imm12",
+ (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"and $rd, $rs1, $imm12",
+ (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"xor $rd, $rs1, $imm12",
+ (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"or $rd, $rs1, $imm12",
+ (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"sll $rd, $rs1, $shamt",
+ (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : InstAlias<"srl $rd, $rs1, $shamt",
+ (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
+def : InstAlias<"sra $rd, $rs1, $shamt",
+ (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
+let Predicates = [IsRV64] in {
+def : InstAlias<"addw $rd, $rs1, $imm12",
+ (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"sllw $rd, $rs1, $shamt",
+ (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
+def : InstAlias<"srlw $rd, $rs1, $shamt",
+ (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
+def : InstAlias<"sraw $rd, $rs1, $shamt",
+ (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
+} // Predicates = [IsRV64]
+def : InstAlias<"slt $rd, $rs1, $imm12",
+ (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+def : InstAlias<"sltu $rd, $rs1, $imm12",
+ (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>;
+}
+
+def : MnemonicAlias<"move", "mv">;
+
+// The SCALL and SBREAK instructions wererenamed to ECALL and EBREAK in
+// version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept
+// the old name for backwards compatibility.
+def : MnemonicAlias<"scall", "ecall">;
+def : MnemonicAlias<"sbreak", "ebreak">;
+
//===----------------------------------------------------------------------===//
// Pseudo-instructions and codegen patterns
//
@@ -560,7 +641,7 @@ def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>;
/// Generic pattern classes
-class PatGprGpr<SDPatternOperator OpNode, RVInstR Inst>
+class PatGprGpr<SDPatternOperator OpNode, RVInst Inst>
: Pat<(OpNode GPR:$rs1, GPR:$rs2), (Inst GPR:$rs1, GPR:$rs2)>;
class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst>
: Pat<(OpNode GPR:$rs1, simm12:$imm12), (Inst GPR:$rs1, simm12:$imm12)>;
@@ -573,12 +654,37 @@ class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst>
def IsOrAdd: PatFrag<(ops node:$A, node:$B), (or node:$A, node:$B), [{
return isOrEquivalentToAdd(N);
}]>;
+def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
+}]>;
+def sexti32 : PatFrags<(ops node:$src),
+ [(sext_inreg node:$src, i32),
+ (assertsexti32 node:$src)]>;
+def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
+}]>;
+def assertzexti5 : PatFrag<(ops node:$src), (assertzext node:$src), [{
+ return cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits() <= 5;
+}]>;
+def zexti32 : PatFrags<(ops node:$src),
+ [(and node:$src, 0xffffffff),
+ (assertzexti32 node:$src)]>;
+// Defines a legal mask for (assertzexti5 (and src, mask)) to be combinable
+// with a shiftw operation. The mask mustn't modify the lower 5 bits or the
+// upper 32 bits.
+def shiftwamt_mask : ImmLeaf<XLenVT, [{
+ return countTrailingOnes<uint64_t>(Imm) >= 5 && isUInt<32>(Imm);
+}]>;
+def shiftwamt : PatFrags<(ops node:$src),
+ [(assertzexti5 (and node:$src, shiftwamt_mask)),
+ (assertzexti5 node:$src)]>;
/// Immediates
def : Pat<(simm12:$imm), (ADDI X0, simm12:$imm)>;
def : Pat<(simm32hi20:$imm), (LUI (HI20 imm:$imm))>;
-def : Pat<(simm32:$imm), (ADDI (LUI (HI20 imm:$imm)), (LO12Sext imm:$imm))>;
+def : Pat<(simm32:$imm), (ADDI (LUI (HI20 imm:$imm)), (LO12Sext imm:$imm))>,
+ Requires<[IsRV32]>;
/// Simple arithmetic operations
@@ -591,13 +697,23 @@ def : PatGprGpr<and, AND>;
def : PatGprSimm12<and, ANDI>;
def : PatGprGpr<xor, XOR>;
def : PatGprSimm12<xor, XORI>;
-def : PatGprGpr<shl, SLL>;
def : PatGprUimmLog2XLen<shl, SLLI>;
-def : PatGprGpr<srl, SRL>;
def : PatGprUimmLog2XLen<srl, SRLI>;
-def : PatGprGpr<sra, SRA>;
def : PatGprUimmLog2XLen<sra, SRAI>;
+// Match both a plain shift and one where the shift amount is masked (this is
+// typically introduced when the legalizer promotes the shift amount and
+// zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base
+// ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I).
+class shiftop<SDPatternOperator operator>
+ : PatFrags<(ops node:$val, node:$count),
+ [(operator node:$val, node:$count),
+ (operator node:$val, (and node:$count, immbottomxlenset))]>;
+
+def : PatGprGpr<shiftop<shl>, SLL>;
+def : PatGprGpr<shiftop<srl>, SRL>;
+def : PatGprGpr<shiftop<sra>, SRA>;
+
/// FrameIndex calculations
def : Pat<(add (i32 AddrFI:$Rs), simm12:$imm12),
@@ -614,7 +730,9 @@ def : PatGprSimm12<setult, SLTIU>;
// Define pattern expansions for setcc operations that aren't directly
// handled by a RISC-V instruction.
+def : Pat<(seteq GPR:$rs1, 0), (SLTIU GPR:$rs1, 1)>;
def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setne GPR:$rs1, 0), (SLTU X0, GPR:$rs1)>;
def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0, (XOR GPR:$rs1, GPR:$rs2))>;
def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>;
def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>;
@@ -638,7 +756,7 @@ def Select_GPR_Using_CC_GPR : SelectCC_rrirr<GPR, GPR>;
// Match `(brcond (CondOp ..), ..)` and lower to the appropriate RISC-V branch
// instruction.
class BccPat<PatFrag CondOp, RVInstB Inst>
- : Pat<(brcond (i32 (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
+ : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
(Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>;
def : BccPat<seteq, BEQ>;
@@ -649,7 +767,7 @@ def : BccPat<setult, BLTU>;
def : BccPat<setuge, BGEU>;
class BccSwapPat<PatFrag CondOp, RVInst InstBcc>
- : Pat<(brcond (i32 (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
+ : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
(InstBcc GPR:$rs2, GPR:$rs1, bb:$imm12)>;
// Condition codes that don't have matching RISC-V branch instructions, but
@@ -664,8 +782,8 @@ def : BccSwapPat<setule, BGEU>;
def : Pat<(brcond GPR:$cond, bb:$imm12), (BNE GPR:$cond, X0, bb:$imm12)>;
let isBarrier = 1, isBranch = 1, isTerminator = 1 in
-def PseudoBR : Pseudo<(outs), (ins simm21_lsb0:$imm20), [(br bb:$imm20)]>,
- PseudoInstExpansion<(JAL X0, simm21_lsb0:$imm20)>;
+def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>,
+ PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>;
let isCall = 1, Defs=[X1] in
let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
@@ -720,6 +838,11 @@ def : Pat<(Tail (iPTR tglobaladdr:$dst)),
def : Pat<(Tail (iPTR texternalsym:$dst)),
(PseudoTAIL texternalsym:$dst)>;
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
+ isAsmParserOnly = 1 in
+def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
+ "lla", "$dst, $src">;
+
/// Loads
multiclass LdPat<PatFrag LoadOp, RVInst Inst> {
@@ -737,7 +860,7 @@ defm : LdPat<sextloadi8, LB>;
defm : LdPat<extloadi8, LB>;
defm : LdPat<sextloadi16, LH>;
defm : LdPat<extloadi16, LH>;
-defm : LdPat<load, LW>;
+defm : LdPat<load, LW>, Requires<[IsRV32]>;
defm : LdPat<zextloadi8, LBU>;
defm : LdPat<zextloadi16, LHU>;
@@ -756,7 +879,7 @@ multiclass StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
defm : StPat<truncstorei8, SB, GPR>;
defm : StPat<truncstorei16, SH, GPR>;
-defm : StPat<store, SW, GPR>;
+defm : StPat<store, SW, GPR>, Requires<[IsRV32]>;
/// Fences
@@ -764,13 +887,13 @@ defm : StPat<store, SW, GPR>;
// Manual: Volume I.
// fence acquire -> fence r, rw
-def : Pat<(atomic_fence (i32 4), (imm)), (FENCE 0b10, 0b11)>;
+def : Pat<(atomic_fence (XLenVT 4), (imm)), (FENCE 0b10, 0b11)>;
// fence release -> fence rw, w
-def : Pat<(atomic_fence (i32 5), (imm)), (FENCE 0b11, 0b1)>;
+def : Pat<(atomic_fence (XLenVT 5), (imm)), (FENCE 0b11, 0b1)>;
// fence acq_rel -> fence.tso
-def : Pat<(atomic_fence (i32 6), (imm)), (FENCE_TSO)>;
+def : Pat<(atomic_fence (XLenVT 6), (imm)), (FENCE_TSO)>;
// fence seq_cst -> fence rw, rw
-def : Pat<(atomic_fence (i32 7), (imm)), (FENCE 0b11, 0b11)>;
+def : Pat<(atomic_fence (XLenVT 7), (imm)), (FENCE 0b11, 0b11)>;
// Lowering for atomic load and store is defined in RISCVInstrInfoA.td.
// Although these are lowered to fence+load/store instructions defined in the
@@ -788,6 +911,66 @@ def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
[(CallSeqEnd timm:$amt1, timm:$amt2)]>;
} // Defs = [X2], Uses = [X2]
+/// RV64 patterns
+
+let Predicates = [IsRV64] in {
+
+/// sext and zext
+
+def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>;
+def : Pat<(and GPR:$rs1, 0xffffffff), (SRLI (SLLI GPR:$rs1, 32), 32)>;
+
+/// ALU operations
+
+def : Pat<(sext_inreg (add GPR:$rs1, GPR:$rs2), i32),
+ (ADDW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (add GPR:$rs1, simm12:$imm12), i32),
+ (ADDIW GPR:$rs1, simm12:$imm12)>;
+def : Pat<(sext_inreg (sub GPR:$rs1, GPR:$rs2), i32),
+ (SUBW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32),
+ (SLLIW GPR:$rs1, uimm5:$shamt)>;
+// (srl (zexti32 ...), uimm5:$shamt) is matched with custom code due to the
+// need to undo manipulation of the mask value performed by DAGCombine.
+def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
+ (SRAIW GPR:$rs1, uimm5:$shamt)>;
+
+// For variable-length shifts, we rely on assertzexti5 being inserted during
+// lowering (see RISCVTargetLowering::PerformDAGCombine). This enables us to
+// guarantee that selecting a 32-bit variable shift is legal (as the variable
+// shift is known to be <= 32). We must also be careful not to create
+// semantically incorrect patterns. For instance, selecting SRLW for
+// (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2)),
+// is not guaranteed to be safe, as we don't know whether the upper 32-bits of
+// the result are used or not (in the case where rs2=0, this is a
+// sign-extension operation).
+
+def : Pat<(sext_inreg (shl GPR:$rs1, (shiftwamt GPR:$rs2)), i32),
+ (SLLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(zexti32 (shl GPR:$rs1, (shiftwamt GPR:$rs2))),
+ (SRLI (SLLI (SLLW GPR:$rs1, GPR:$rs2), 32), 32)>;
+
+def : Pat<(sext_inreg (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2)), i32),
+ (SRLW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(zexti32 (srl (zexti32 GPR:$rs1), (shiftwamt GPR:$rs2))),
+ (SRLI (SLLI (SRLW GPR:$rs1, GPR:$rs2), 32), 32)>;
+
+def : Pat<(sra (sexti32 GPR:$rs1), (shiftwamt GPR:$rs2)),
+ (SRAW GPR:$rs1, GPR:$rs2)>;
+
+/// Loads
+
+defm : LdPat<sextloadi32, LW>;
+defm : LdPat<extloadi32, LW>;
+defm : LdPat<zextloadi32, LWU>;
+defm : LdPat<load, LD>;
+
+/// Stores
+
+defm : StPat<truncstorei32, SW, GPR>;
+defm : StPat<store, SD, GPR>;
+} // Predicates = [IsRV64]
+
//===----------------------------------------------------------------------===//
// Standard extensions
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/RISCV/RISCVInstrInfoA.td b/lib/Target/RISCV/RISCVInstrInfoA.td
index 379322060438..9cb1d2f0b627 100644
--- a/lib/Target/RISCV/RISCVInstrInfoA.td
+++ b/lib/Target/RISCV/RISCVInstrInfoA.td
@@ -44,6 +44,17 @@ multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
}
+multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
+ def : Pat<(StoreOp GPR:$rs1, StTy:$rs2), (Inst StTy:$rs2, GPR:$rs1, 0)>;
+ def : Pat<(StoreOp AddrFI:$rs1, StTy:$rs2), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
+ def : Pat<(StoreOp (add GPR:$rs1, simm12:$imm12), StTy:$rs2),
+ (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
+ def : Pat<(StoreOp (add AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
+ (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
+ def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
+ (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
+}
+
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
@@ -91,7 +102,177 @@ defm : LdPat<atomic_load_8, LB>;
defm : LdPat<atomic_load_16, LH>;
defm : LdPat<atomic_load_32, LW>;
-defm : StPat<atomic_store_8, SB, GPR>;
-defm : StPat<atomic_store_16, SH, GPR>;
-defm : StPat<atomic_store_32, SW, GPR>;
-} // Predicates = [HasStdExtF]
+defm : AtomicStPat<atomic_store_8, SB, GPR>;
+defm : AtomicStPat<atomic_store_16, SH, GPR>;
+defm : AtomicStPat<atomic_store_32, SW, GPR>;
+
+/// AMOs
+
+multiclass AMOPat<string AtomicOp, string BaseInst> {
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
+ !cast<RVInst>(BaseInst)>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
+ !cast<RVInst>(BaseInst#"_AQ")>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
+ !cast<RVInst>(BaseInst#"_RL")>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
+ !cast<RVInst>(BaseInst#"_AQ_RL")>;
+ def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
+ !cast<RVInst>(BaseInst#"_AQ_RL")>;
+}
+
+defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
+defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
+defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
+defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
+defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
+defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
+defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
+defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
+defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
+
+def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
+ (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
+ (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
+ (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
+ (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
+ (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
+
+/// Pseudo AMOs
+
+class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+def PseudoAtomicLoadNand32 : PseudoAMO;
+// Ordering constants must be kept in sync with the AtomicOrdering enum in
+// AtomicOrdering.h.
+def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
+def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
+def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
+def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
+def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
+ (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
+
+class PseudoMaskedAMO
+ : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+class PseudoMaskedAMOMinMax
+ : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
+ (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
+ ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
+ "@earlyclobber $scratch2";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+class PseudoMaskedAMOUMinUMax
+ : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
+ (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
+ "@earlyclobber $scratch2";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
+ : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering),
+ (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering)>;
+
+class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
+ : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
+ imm:$ordering),
+ (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
+ imm:$ordering)>;
+
+def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
+ PseudoMaskedAtomicSwap32>;
+def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
+ PseudoMaskedAtomicLoadAdd32>;
+def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
+ PseudoMaskedAtomicLoadSub32>;
+def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
+ PseudoMaskedAtomicLoadNand32>;
+def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
+ PseudoMaskedAtomicLoadMax32>;
+def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
+def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
+ PseudoMaskedAtomicLoadMin32>;
+def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
+ PseudoMaskedAtomicLoadUMax32>;
+def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
+def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
+ PseudoMaskedAtomicLoadUMin32>;
+
+/// Compare and exchange
+
+class PseudoCmpXchg
+ : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$cmpval, GPR:$newval, i32imm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+// Ordering constants must be kept in sync with the AtomicOrdering enum in
+// AtomicOrdering.h.
+multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
+ def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
+ def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
+ def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
+ def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
+ def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
+ (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
+}
+
+def PseudoCmpXchg32 : PseudoCmpXchg;
+defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
+
+def PseudoMaskedCmpXchg32
+ : Pseudo<(outs GPR:$res, GPR:$scratch),
+ (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
+ i32imm:$ordering), []> {
+ let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+}
+
+def : Pat<(int_riscv_masked_cmpxchg_i32
+ GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
+ (PseudoMaskedCmpXchg32
+ GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
+
+} // Predicates = [HasStdExtA]
diff --git a/lib/Target/RISCV/RISCVInstrInfoC.td b/lib/Target/RISCV/RISCVInstrInfoC.td
index 5d1c62c0b653..ad68b5a7dc97 100644
--- a/lib/Target/RISCV/RISCVInstrInfoC.td
+++ b/lib/Target/RISCV/RISCVInstrInfoC.td
@@ -167,7 +167,7 @@ def uimm10_lsb00nonzero : Operand<XLenVT>,
[{return isShiftedUInt<8, 2>(Imm) && (Imm != 0);}]> {
let ParserMatchClass = UImmAsmOperand<10, "Lsb00NonZero">;
let EncoderMethod = "getImmOpValue";
- let DecoderMethod = "decodeUImmOperand<10>";
+ let DecoderMethod = "decodeUImmNonZeroOperand<10>";
let MCOperandPredicate = [{
int64_t Imm;
if (!MCOp.evaluateAsConstantImm(Imm))
@@ -182,12 +182,12 @@ def simm10_lsb0000nonzero : Operand<XLenVT>,
[{return (Imm != 0) && isShiftedInt<6, 4>(Imm);}]> {
let ParserMatchClass = SImmAsmOperand<10, "Lsb0000NonZero">;
let EncoderMethod = "getImmOpValue";
- let DecoderMethod = "decodeSImmOperand<10>";
+ let DecoderMethod = "decodeSImmNonZeroOperand<10>";
let MCOperandPredicate = [{
int64_t Imm;
if (!MCOp.evaluateAsConstantImm(Imm))
return false;
- return isShiftedInt<6, 4>(Imm);
+ return isShiftedInt<6, 4>(Imm) && (Imm != 0);
}];
}
@@ -258,16 +258,13 @@ class Shift_right<bits<2> funct2, string OpcodeStr, RegisterClass cls,
}
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
-class CS_ALU<bits<2> funct2, string OpcodeStr, RegisterClass cls,
- bit RV64only>
- : RVInst16CS<0b100, 0b01, (outs cls:$rd_wb), (ins cls:$rd, cls:$rs2),
+class CS_ALU<bits<6> funct6, bits<2> funct2, string OpcodeStr,
+ RegisterClass cls>
+ : RVInst16CA<funct6, funct2, 0b01, (outs cls:$rd_wb), (ins cls:$rd, cls:$rs2),
OpcodeStr, "$rd, $rs2"> {
bits<3> rd;
let Constraints = "$rd = $rd_wb";
- let Inst{12} = RV64only;
- let Inst{11-10} = 0b11;
let Inst{9-7} = rd;
- let Inst{6-5} = funct2;
}
//===----------------------------------------------------------------------===//
@@ -411,14 +408,14 @@ def C_ANDI : RVInst16CB<0b100, 0b01, (outs GPRC:$rs1_wb), (ins GPRC:$rs1, simm6:
let Inst{6-2} = imm{4-0};
}
-def C_SUB : CS_ALU<0b00, "c.sub", GPRC, 0>;
-def C_XOR : CS_ALU<0b01, "c.xor", GPRC, 0>;
-def C_OR : CS_ALU<0b10, "c.or" , GPRC, 0>;
-def C_AND : CS_ALU<0b11, "c.and", GPRC, 0>;
+def C_SUB : CS_ALU<0b100011, 0b00, "c.sub", GPRC>;
+def C_XOR : CS_ALU<0b100011, 0b01, "c.xor", GPRC>;
+def C_OR : CS_ALU<0b100011, 0b10, "c.or" , GPRC>;
+def C_AND : CS_ALU<0b100011, 0b11, "c.and", GPRC>;
let Predicates = [HasStdExtC, IsRV64] in {
-def C_SUBW : CS_ALU<0b00, "c.subw", GPRC, 1>;
-def C_ADDW : CS_ALU<0b01, "c.addw", GPRC, 1>;
+def C_SUBW : CS_ALU<0b100111, 0b00, "c.subw", GPRC>;
+def C_ADDW : CS_ALU<0b100111, 0b01, "c.addw", GPRC>;
}
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
@@ -478,7 +475,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
def C_MV : RVInst16CR<0b1000, 0b10, (outs GPRNoX0:$rs1), (ins GPRNoX0:$rs2),
"c.mv", "$rs1, $rs2">;
-let rs1 = 0, rs2 = 0, hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+let rs1 = 0, rs2 = 0, hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
def C_EBREAK : RVInst16CR<0b1001, 0b10, (outs), (ins), "c.ebreak", "">;
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
@@ -517,6 +514,13 @@ def C_SDSP : CStackStore<0b111, "c.sdsp", GPR, uimm9_lsb000> {
let Inst{9-7} = imm{8-6};
}
+// The all zeros pattern isn't a valid RISC-V instruction. It's used by GNU
+// binutils as 16-bit instruction known to be unimplemented (i.e., trapping).
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
+def C_UNIMP : RVInst16<(outs), (ins), "c.unimp", "", [], InstFormatOther> {
+ let Inst{15-0} = 0;
+}
+
} // Predicates = [HasStdExtC]
//===----------------------------------------------------------------------===//
@@ -625,6 +629,8 @@ def : CompressPat<(AND GPRC:$rs1, GPRC:$rs2, GPRC:$rs1),
} // Predicates = [HasStdExtC]
let Predicates = [HasStdExtC, IsRV64] in {
+def : CompressPat<(ADDIW GPRNoX0:$rd, X0, simm6:$imm),
+ (C_LI GPRNoX0:$rd, simm6:$imm)>;
def : CompressPat<(SUBW GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
(C_SUBW GPRC:$rs1, GPRC:$rs2)>;
def : CompressPat<(ADDW GPRC:$rs1, GPRC:$rs1, GPRC:$rs2),
@@ -678,6 +684,7 @@ def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs2, X0),
def : CompressPat<(ADDI GPRNoX0:$rs1, GPRNoX0:$rs2, 0),
(C_MV GPRNoX0:$rs1, GPRNoX0:$rs2)>;
def : CompressPat<(EBREAK), (C_EBREAK)>;
+def : CompressPat<(UNIMP), (C_UNIMP)>;
def : CompressPat<(JALR X1, GPRNoX0:$rs1, 0),
(C_JALR GPRNoX0:$rs1)>;
def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs1, GPRNoX0:$rs2),
diff --git a/lib/Target/RISCV/RISCVInstrInfoD.td b/lib/Target/RISCV/RISCVInstrInfoD.td
index 06b834d55ade..9f1cd50de595 100644
--- a/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -212,13 +212,8 @@ let Predicates = [HasStdExtD] in {
def : Pat<(fpround FPR64:$rs1), (FCVT_S_D FPR64:$rs1, 0b111)>;
def : Pat<(fpextend FPR32:$rs1), (FCVT_D_S FPR32:$rs1)>;
-// FP->[u]int. Round-to-zero must be used
-def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_W_D FPR64:$rs1, 0b001)>;
-def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>;
-
-// [u]int->fp
-def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>;
-def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>;
+// [u]int<->double conversion patterns must be gated on IsRV32 or IsRV64, so
+// are defined later.
/// Float arithmetic operations
@@ -235,6 +230,22 @@ def : Pat<(fabs FPR64:$rs1), (FSGNJX_D $rs1, $rs1)>;
def : PatFpr64Fpr64<fcopysign, FSGNJ_D>;
def : Pat<(fcopysign FPR64:$rs1, (fneg FPR64:$rs2)), (FSGNJN_D $rs1, $rs2)>;
+// fmadd: rs1 * rs2 + rs3
+def : Pat<(fma FPR64:$rs1, FPR64:$rs2, FPR64:$rs3),
+ (FMADD_D $rs1, $rs2, $rs3, 0b111)>;
+
+// fmsub: rs1 * rs2 - rs3
+def : Pat<(fma FPR64:$rs1, FPR64:$rs2, (fneg FPR64:$rs3)),
+ (FMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+
+// fnmsub: -rs1 * rs2 + rs3
+def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, FPR64:$rs3),
+ (FNMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+
+// fnmadd: -rs1 * rs2 - rs3
+def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, (fneg FPR64:$rs3)),
+ (FNMADD_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>;
+
// The RISC-V 2.2 user-level ISA spec defines fmin and fmax as returning the
// canonical NaN when giving a signaling NaN. This doesn't match the LLVM
// behaviour (see https://bugs.llvm.org/show_bug.cgi?id=27363). However, the
@@ -287,3 +298,13 @@ def SplitF64Pseudo
[(set GPR:$dst1, GPR:$dst2, (RISCVSplitF64 FPR64:$src))]>;
} // Predicates = [HasStdExtD]
+
+let Predicates = [HasStdExtD, IsRV32] in {
+// double->[u]int. Round-to-zero must be used.
+def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_W_D FPR64:$rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>;
+
+// [u]int->double.
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>;
+} // Predicates = [HasStdExtD, IsRV32]
diff --git a/lib/Target/RISCV/RISCVInstrInfoF.td b/lib/Target/RISCV/RISCVInstrInfoF.td
index 6d7c59becf24..03bdac45873d 100644
--- a/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -27,7 +27,7 @@ def FRMArg : AsmOperandClass {
def frmarg : Operand<XLenVT> {
let ParserMatchClass = FRMArg;
let PrintMethod = "printFRMArg";
- let DecoderMethod = "decodeUImmOperand<3>";
+ let DecoderMethod = "decodeFRMArg";
}
//===----------------------------------------------------------------------===//
@@ -252,13 +252,8 @@ let Predicates = [HasStdExtF] in {
def : Pat<(bitconvert GPR:$rs1), (FMV_W_X GPR:$rs1)>;
def : Pat<(bitconvert FPR32:$rs1), (FMV_X_W FPR32:$rs1)>;
-// FP->[u]int. Round-to-zero must be used
-def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>;
-def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>;
-
-// [u]int->fp. Match GCC and default to using dynamic rounding mode.
-def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>;
-def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>;
+// [u]int32<->float conversion patterns must be gated on IsRV32 or IsRV64, so
+// are defined later.
/// Float arithmetic operations
@@ -275,6 +270,22 @@ def : Pat<(fabs FPR32:$rs1), (FSGNJX_S $rs1, $rs1)>;
def : PatFpr32Fpr32<fcopysign, FSGNJ_S>;
def : Pat<(fcopysign FPR32:$rs1, (fneg FPR32:$rs2)), (FSGNJN_S $rs1, $rs2)>;
+// fmadd: rs1 * rs2 + rs3
+def : Pat<(fma FPR32:$rs1, FPR32:$rs2, FPR32:$rs3),
+ (FMADD_S $rs1, $rs2, $rs3, 0b111)>;
+
+// fmsub: rs1 * rs2 - rs3
+def : Pat<(fma FPR32:$rs1, FPR32:$rs2, (fneg FPR32:$rs3)),
+ (FMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+
+// fnmsub: -rs1 * rs2 + rs3
+def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, FPR32:$rs3),
+ (FNMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+
+// fnmadd: -rs1 * rs2 - rs3
+def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, (fneg FPR32:$rs3)),
+ (FNMADD_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>;
+
// The RISC-V 2.2 user-level ISA spec defines fmin and fmax as returning the
// canonical NaN when given a signaling NaN. This doesn't match the LLVM
// behaviour (see https://bugs.llvm.org/show_bug.cgi?id=27363). However, the
@@ -313,3 +324,13 @@ defm : LdPat<load, FLW>;
defm : StPat<store, FSW, FPR32>;
} // Predicates = [HasStdExtF]
+
+let Predicates = [HasStdExtF, IsRV32] in {
+// float->[u]int. Round-to-zero must be used.
+def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>;
+
+// [u]int->float. Match GCC and default to using dynamic rounding mode.
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>;
+} // Predicates = [HasStdExtF, IsRV32]
diff --git a/lib/Target/RISCV/RISCVInstrInfoM.td b/lib/Target/RISCV/RISCVInstrInfoM.td
index 2dd10ada4003..05dd3311ad54 100644
--- a/lib/Target/RISCV/RISCVInstrInfoM.td
+++ b/lib/Target/RISCV/RISCVInstrInfoM.td
@@ -49,3 +49,34 @@ def : PatGprGpr<udiv, DIVU>;
def : PatGprGpr<srem, REM>;
def : PatGprGpr<urem, REMU>;
} // Predicates = [HasStdExtM]
+
+let Predicates = [HasStdExtM, IsRV64] in {
+def : Pat<(sext_inreg (mul GPR:$rs1, GPR:$rs2), i32),
+ (MULW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (sdiv (sexti32 GPR:$rs1),
+ (sexti32 GPR:$rs2)), i32),
+ (DIVW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(zexti32 (sdiv (sexti32 GPR:$rs1),
+ (sexti32 GPR:$rs2))),
+ (SRLI (SLLI (DIVW GPR:$rs1, GPR:$rs2), 32), 32)>;
+def : Pat<(sext_inreg (udiv (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), i32),
+ (DIVUW GPR:$rs1, GPR:$rs2)>;
+// It's cheaper to perform a divuw and zero-extend the result than to
+// zero-extend both inputs to a udiv.
+def : Pat<(udiv (and GPR:$rs1, 0xffffffff), (and GPR:$rs2, 0xffffffff)),
+ (SRLI (SLLI (DIVUW GPR:$rs1, GPR:$rs2), 32), 32)>;
+// Although the sexti32 operands may not have originated from an i32 srem,
+// this pattern is safe as it is impossible for two sign extended inputs to
+// produce a result where res[63:32]=0 and res[31]=1.
+def : Pat<(srem (sexti32 GPR:$rs1), (sexti32 GPR:$rs2)),
+ (REMW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (srem (sexti32 GPR:$rs1),
+ (sexti32 GPR:$rs2)), i32),
+ (REMW GPR:$rs1, GPR:$rs2)>;
+def : Pat<(sext_inreg (urem (zexti32 GPR:$rs1), (zexti32 GPR:$rs2)), i32),
+ (REMUW GPR:$rs1, GPR:$rs2)>;
+// It's cheaper to perform a remuw and zero-extend the result than to
+// zero-extend both inputs to a urem.
+def : Pat<(urem (and GPR:$rs1, 0xffffffff), (and GPR:$rs2, 0xffffffff)),
+ (SRLI (SLLI (REMUW GPR:$rs1, GPR:$rs2), 32), 32)>;
+} // Predicates = [HasStdExtM, IsRV64]
diff --git a/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
index b8fa8a97d41a..cea009c5447d 100644
--- a/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
+++ b/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
@@ -62,7 +62,7 @@ private:
MachineRegisterInfo *MRI;
std::set<MachineInstr *> DeadInstrs;
};
-}; // end anonymous namespace
+} // end anonymous namespace
char RISCVMergeBaseOffsetOpt::ID = 0;
INITIALIZE_PASS(RISCVMergeBaseOffsetOpt, "riscv-merge-base-offset",
diff --git a/lib/Target/RISCV/RISCVSystemOperands.td b/lib/Target/RISCV/RISCVSystemOperands.td
new file mode 100644
index 000000000000..f1b7984ffe6b
--- /dev/null
+++ b/lib/Target/RISCV/RISCVSystemOperands.td
@@ -0,0 +1,352 @@
+//===- RISCVSystemOperands.td ----------------------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the symbolic operands permitted for various kinds of
+// RISC-V system instruction.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/TableGen/SearchableTable.td"
+
+//===----------------------------------------------------------------------===//
+// CSR (control and status register read/write) instruction options.
+//===----------------------------------------------------------------------===//
+
+class SysReg<string name, bits<12> op> {
+ string Name = name;
+ bits<12> Encoding = op;
+ // FIXME: add these additional fields when needed.
+ // Privilege Access: Read and Write = 0, 1, 2; Read-Only = 3.
+ // Privilege Mode: User = 0, System = 1 or Machine = 3.
+ // bits<2> ReadWrite = op{11 - 10};
+ // bits<2> XMode = op{9 - 8};
+ // Check Extra field name and what bits 7-6 correspond to.
+ // bits<2> Extra = op{7 - 6};
+ // Register number without the privilege bits.
+ // bits<6> Number = op{5 - 0};
+ code FeaturesRequired = [{ {} }];
+ bit isRV32Only = 0;
+}
+
+def SysRegsList : GenericTable {
+ let FilterClass = "SysReg";
+ // FIXME: add "ReadWrite", "Mode", "Extra", "Number" fields when needed.
+ let Fields = [ "Name", "Encoding", "FeaturesRequired", "isRV32Only" ];
+
+ let PrimaryKey = [ "Encoding" ];
+ let PrimaryKeyName = "lookupSysRegByEncoding";
+}
+
+def lookupSysRegByName : SearchIndex {
+ let Table = SysRegsList;
+ let Key = [ "Name" ];
+}
+
+// The following CSR encodings match those given in Tables 2.2,
+// 2.3, 2.4 and 2.5 in the RISC-V Instruction Set Manual
+// Volume II: Privileged Architecture.
+
+//===--------------------------
+// User Trap Setup
+//===--------------------------
+def : SysReg<"ustatus", 0x000>;
+def : SysReg<"uie", 0x004>;
+def : SysReg<"utvec", 0x005>;
+
+//===--------------------------
+// User Trap Handling
+//===--------------------------
+def : SysReg<"uscratch", 0x040>;
+def : SysReg<"uepc", 0x041>;
+def : SysReg<"ucause", 0x042>;
+def : SysReg<"utval", 0x043>;
+def : SysReg<"uip", 0x044>;
+
+//===--------------------------
+// User Floating-Point CSRs
+//===--------------------------
+
+let FeaturesRequired = [{ {RISCV::FeatureStdExtF} }] in {
+def : SysReg<"fflags", 0x001>;
+def : SysReg<"frm", 0x002>;
+def : SysReg<"fcsr", 0x003>;
+}
+
+//===--------------------------
+// User Counter/Timers
+//===--------------------------
+def : SysReg<"cycle", 0xC00>;
+def : SysReg<"time", 0xC01>;
+def : SysReg<"instret", 0xC02>;
+
+def : SysReg<"hpmcounter3", 0xC03>;
+def : SysReg<"hpmcounter4", 0xC04>;
+def : SysReg<"hpmcounter5", 0xC05>;
+def : SysReg<"hpmcounter6", 0xC06>;
+def : SysReg<"hpmcounter7", 0xC07>;
+def : SysReg<"hpmcounter8", 0xC08>;
+def : SysReg<"hpmcounter9", 0xC09>;
+def : SysReg<"hpmcounter10", 0xC0A>;
+def : SysReg<"hpmcounter11", 0xC0B>;
+def : SysReg<"hpmcounter12", 0xC0C>;
+def : SysReg<"hpmcounter13", 0xC0D>;
+def : SysReg<"hpmcounter14", 0xC0E>;
+def : SysReg<"hpmcounter15", 0xC0F>;
+def : SysReg<"hpmcounter16", 0xC10>;
+def : SysReg<"hpmcounter17", 0xC11>;
+def : SysReg<"hpmcounter18", 0xC12>;
+def : SysReg<"hpmcounter19", 0xC13>;
+def : SysReg<"hpmcounter20", 0xC14>;
+def : SysReg<"hpmcounter21", 0xC15>;
+def : SysReg<"hpmcounter22", 0xC16>;
+def : SysReg<"hpmcounter23", 0xC17>;
+def : SysReg<"hpmcounter24", 0xC18>;
+def : SysReg<"hpmcounter25", 0xC19>;
+def : SysReg<"hpmcounter26", 0xC1A>;
+def : SysReg<"hpmcounter27", 0xC1B>;
+def : SysReg<"hpmcounter28", 0xC1C>;
+def : SysReg<"hpmcounter29", 0xC1D>;
+def : SysReg<"hpmcounter30", 0xC1E>;
+def : SysReg<"hpmcounter31", 0xC1F>;
+
+let isRV32Only = 1 in {
+def: SysReg<"cycleh", 0xC80>;
+def: SysReg<"timeh", 0xC81>;
+def: SysReg<"instreth", 0xC82>;
+
+def: SysReg<"hpmcounter3h", 0xC83>;
+def: SysReg<"hpmcounter4h", 0xC84>;
+def: SysReg<"hpmcounter5h", 0xC85>;
+def: SysReg<"hpmcounter6h", 0xC86>;
+def: SysReg<"hpmcounter7h", 0xC87>;
+def: SysReg<"hpmcounter8h", 0xC88>;
+def: SysReg<"hpmcounter9h", 0xC89>;
+def: SysReg<"hpmcounter10h", 0xC8A>;
+def: SysReg<"hpmcounter11h", 0xC8B>;
+def: SysReg<"hpmcounter12h", 0xC8C>;
+def: SysReg<"hpmcounter13h", 0xC8D>;
+def: SysReg<"hpmcounter14h", 0xC8E>;
+def: SysReg<"hpmcounter15h", 0xC8F>;
+def: SysReg<"hpmcounter16h", 0xC90>;
+def: SysReg<"hpmcounter17h", 0xC91>;
+def: SysReg<"hpmcounter18h", 0xC92>;
+def: SysReg<"hpmcounter19h", 0xC93>;
+def: SysReg<"hpmcounter20h", 0xC94>;
+def: SysReg<"hpmcounter21h", 0xC95>;
+def: SysReg<"hpmcounter22h", 0xC96>;
+def: SysReg<"hpmcounter23h", 0xC97>;
+def: SysReg<"hpmcounter24h", 0xC98>;
+def: SysReg<"hpmcounter25h", 0xC99>;
+def: SysReg<"hpmcounter26h", 0xC9A>;
+def: SysReg<"hpmcounter27h", 0xC9B>;
+def: SysReg<"hpmcounter28h", 0xC9C>;
+def: SysReg<"hpmcounter29h", 0xC9D>;
+def: SysReg<"hpmcounter30h", 0xC9E>;
+def: SysReg<"hpmcounter31h", 0xC9F>;
+}
+
+//===--------------------------
+// Supervisor Trap Setup
+//===--------------------------
+def : SysReg<"sstatus", 0x100>;
+def : SysReg<"sedeleg", 0x102>;
+def : SysReg<"sideleg", 0x103>;
+def : SysReg<"sie", 0x104>;
+def : SysReg<"stvec", 0x105>;
+def : SysReg<"scounteren", 0x106>;
+
+//===--------------------------
+// Supervisor Trap Handling
+//===--------------------------
+def : SysReg<"sscratch", 0x140>;
+def : SysReg<"sepc", 0x141>;
+def : SysReg<"scause", 0x142>;
+def : SysReg<"stval", 0x143>;
+def : SysReg<"sip", 0x144>;
+
+//===-------------------------------------
+// Supervisor Protection and Translation
+//===-------------------------------------
+def : SysReg<"satp", 0x180>;
+
+//===-----------------------------
+// Machine Information Registers
+//===-----------------------------
+
+def : SysReg<"mvendorid", 0xF11>;
+def : SysReg<"marchid", 0xF12>;
+def : SysReg<"mimpid", 0xF13>;
+def : SysReg<"mhartid", 0xF14>;
+
+//===-----------------------------
+// Machine Trap Setup
+//===-----------------------------
+def : SysReg<"mstatus", 0x300>;
+def : SysReg<"misa", 0x301>;
+def : SysReg<"medeleg", 0x302>;
+def : SysReg<"mideleg", 0x303>;
+def : SysReg<"mie", 0x304>;
+def : SysReg<"mtvec", 0x305>;
+def : SysReg<"mcounteren", 0x306>;
+
+//===-----------------------------
+// Machine Trap Handling
+//===-----------------------------
+def : SysReg<"mscratch", 0x340>;
+def : SysReg<"mepc", 0x341>;
+def : SysReg<"mcause", 0x342>;
+def : SysReg<"mtval", 0x343>;
+def : SysReg<"mip", 0x344>;
+
+//===----------------------------------
+// Machine Protection and Translation
+//===----------------------------------
+def : SysReg<"pmpcfg0", 0x3A0>;
+def : SysReg<"pmpcfg2", 0x3A2>;
+let isRV32Only = 1 in {
+def : SysReg<"pmpcfg1", 0x3A1>;
+def : SysReg<"pmpcfg3", 0x3A3>;
+}
+
+def : SysReg<"pmpaddr0", 0x3B0>;
+def : SysReg<"pmpaddr1", 0x3B1>;
+def : SysReg<"pmpaddr2", 0x3B2>;
+def : SysReg<"pmpaddr3", 0x3B3>;
+def : SysReg<"pmpaddr4", 0x3B4>;
+def : SysReg<"pmpaddr5", 0x3B5>;
+def : SysReg<"pmpaddr6", 0x3B6>;
+def : SysReg<"pmpaddr7", 0x3B7>;
+def : SysReg<"pmpaddr8", 0x3B8>;
+def : SysReg<"pmpaddr9", 0x3B9>;
+def : SysReg<"pmpaddr10", 0x3BA>;
+def : SysReg<"pmpaddr11", 0x3BB>;
+def : SysReg<"pmpaddr12", 0x3BC>;
+def : SysReg<"pmpaddr13", 0x3BD>;
+def : SysReg<"pmpaddr14", 0x3BE>;
+def : SysReg<"pmpaddr15", 0x3BF>;
+
+
+//===--------------------------
+// Machine Counter and Timers
+//===--------------------------
+def : SysReg<"mcycle", 0xB00>;
+def : SysReg<"minstret", 0xB02>;
+
+def : SysReg<"mhpmcounter3", 0xB03>;
+def : SysReg<"mhpmcounter4", 0xB04>;
+def : SysReg<"mhpmcounter5", 0xB05>;
+def : SysReg<"mhpmcounter6", 0xB06>;
+def : SysReg<"mhpmcounter7", 0xB07>;
+def : SysReg<"mhpmcounter8", 0xB08>;
+def : SysReg<"mhpmcounter9", 0xB09>;
+def : SysReg<"mhpmcounter10", 0xB0A>;
+def : SysReg<"mhpmcounter11", 0xB0B>;
+def : SysReg<"mhpmcounter12", 0xB0C>;
+def : SysReg<"mhpmcounter13", 0xB0D>;
+def : SysReg<"mhpmcounter14", 0xB0E>;
+def : SysReg<"mhpmcounter15", 0xB0F>;
+def : SysReg<"mhpmcounter16", 0xB10>;
+def : SysReg<"mhpmcounter17", 0xB11>;
+def : SysReg<"mhpmcounter18", 0xB12>;
+def : SysReg<"mhpmcounter19", 0xB13>;
+def : SysReg<"mhpmcounter20", 0xB14>;
+def : SysReg<"mhpmcounter21", 0xB15>;
+def : SysReg<"mhpmcounter22", 0xB16>;
+def : SysReg<"mhpmcounter23", 0xB17>;
+def : SysReg<"mhpmcounter24", 0xB18>;
+def : SysReg<"mhpmcounter25", 0xB19>;
+def : SysReg<"mhpmcounter26", 0xB1A>;
+def : SysReg<"mhpmcounter27", 0xB1B>;
+def : SysReg<"mhpmcounter28", 0xB1C>;
+def : SysReg<"mhpmcounter29", 0xB1D>;
+def : SysReg<"mhpmcounter30", 0xB1E>;
+def : SysReg<"mhpmcounter31", 0xB1F>;
+
+let isRV32Only = 1 in {
+def: SysReg<"mcycleh", 0xB80>;
+def: SysReg<"minstreth", 0xB82>;
+
+def: SysReg<"mhpmcounter3h", 0xB83>;
+def: SysReg<"mhpmcounter4h", 0xB84>;
+def: SysReg<"mhpmcounter5h", 0xB85>;
+def: SysReg<"mhpmcounter6h", 0xB86>;
+def: SysReg<"mhpmcounter7h", 0xB87>;
+def: SysReg<"mhpmcounter8h", 0xB88>;
+def: SysReg<"mhpmcounter9h", 0xB89>;
+def: SysReg<"mhpmcounter10h", 0xB8A>;
+def: SysReg<"mhpmcounter11h", 0xB8B>;
+def: SysReg<"mhpmcounter12h", 0xB8C>;
+def: SysReg<"mhpmcounter13h", 0xB8D>;
+def: SysReg<"mhpmcounter14h", 0xB8E>;
+def: SysReg<"mhpmcounter15h", 0xB8F>;
+def: SysReg<"mhpmcounter16h", 0xB90>;
+def: SysReg<"mhpmcounter17h", 0xB91>;
+def: SysReg<"mhpmcounter18h", 0xB92>;
+def: SysReg<"mhpmcounter19h", 0xB93>;
+def: SysReg<"mhpmcounter20h", 0xB94>;
+def: SysReg<"mhpmcounter21h", 0xB95>;
+def: SysReg<"mhpmcounter22h", 0xB96>;
+def: SysReg<"mhpmcounter23h", 0xB97>;
+def: SysReg<"mhpmcounter24h", 0xB98>;
+def: SysReg<"mhpmcounter25h", 0xB99>;
+def: SysReg<"mhpmcounter26h", 0xB9A>;
+def: SysReg<"mhpmcounter27h", 0xB9B>;
+def: SysReg<"mhpmcounter28h", 0xB9C>;
+def: SysReg<"mhpmcounter29h", 0xB9D>;
+def: SysReg<"mhpmcounter30h", 0xB9E>;
+def: SysReg<"mhpmcounter31h", 0xB9F>;
+}
+
+//===--------------------------
+// Machine Counter Setup
+//===--------------------------
+def : SysReg<"mhpmevent3", 0x323>;
+def : SysReg<"mhpmevent4", 0x324>;
+def : SysReg<"mhpmevent5", 0x325>;
+def : SysReg<"mhpmevent6", 0x326>;
+def : SysReg<"mhpmevent7", 0x327>;
+def : SysReg<"mhpmevent8", 0x328>;
+def : SysReg<"mhpmevent9", 0x329>;
+def : SysReg<"mhpmevent10", 0x32A>;
+def : SysReg<"mhpmevent11", 0x32B>;
+def : SysReg<"mhpmevent12", 0x32C>;
+def : SysReg<"mhpmevent13", 0x32D>;
+def : SysReg<"mhpmevent14", 0x32E>;
+def : SysReg<"mhpmevent15", 0x32F>;
+def : SysReg<"mhpmevent16", 0x330>;
+def : SysReg<"mhpmevent17", 0x331>;
+def : SysReg<"mhpmevent18", 0x332>;
+def : SysReg<"mhpmevent19", 0x333>;
+def : SysReg<"mhpmevent20", 0x334>;
+def : SysReg<"mhpmevent21", 0x335>;
+def : SysReg<"mhpmevent22", 0x336>;
+def : SysReg<"mhpmevent23", 0x337>;
+def : SysReg<"mhpmevent24", 0x338>;
+def : SysReg<"mhpmevent25", 0x339>;
+def : SysReg<"mhpmevent26", 0x33A>;
+def : SysReg<"mhpmevent27", 0x33B>;
+def : SysReg<"mhpmevent28", 0x33C>;
+def : SysReg<"mhpmevent29", 0x33D>;
+def : SysReg<"mhpmevent30", 0x33E>;
+def : SysReg<"mhpmevent31", 0x33F>;
+
+//===-----------------------------------------------
+// Debug/ Trace Registers (shared with Debug Mode)
+//===-----------------------------------------------
+def : SysReg<"tselect", 0x7A0>;
+def : SysReg<"tdata1", 0x7A1>;
+def : SysReg<"tdata2", 0x7A2>;
+def : SysReg<"tdata3", 0x7A3>;
+
+//===-----------------------------------------------
+// Debug Mode Registers
+//===-----------------------------------------------
+def : SysReg<"dcsr", 0x7B0>;
+def : SysReg<"dpc", 0x7B1>;
+def : SysReg<"dscratch", 0x7B2>;
diff --git a/lib/Target/RISCV/RISCVTargetMachine.cpp b/lib/Target/RISCV/RISCVTargetMachine.cpp
index a2ebf5bf3e6b..8937ec200bd7 100644
--- a/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -27,6 +27,8 @@ using namespace llvm;
extern "C" void LLVMInitializeRISCVTarget() {
RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
+ auto PR = PassRegistry::getPassRegistry();
+ initializeRISCVExpandPseudoPass(*PR);
}
static std::string computeDataLayout(const Triple &TT) {
@@ -45,12 +47,6 @@ static Reloc::Model getEffectiveRelocModel(const Triple &TT,
return *RM;
}
-static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM) {
- if (CM)
- return *CM;
- return CodeModel::Small;
-}
-
RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
@@ -59,7 +55,7 @@ RISCVTargetMachine::RISCVTargetMachine(const Target &T, const Triple &TT,
CodeGenOpt::Level OL, bool JIT)
: LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options,
getEffectiveRelocModel(TT, RM),
- getEffectiveCodeModel(CM), OL),
+ getEffectiveCodeModel(CM, CodeModel::Small), OL),
TLOF(make_unique<RISCVELFTargetObjectFile>()),
Subtarget(TT, CPU, FS, *this) {
initAsmInfo();
@@ -78,6 +74,7 @@ public:
void addIRPasses() override;
bool addInstSelector() override;
void addPreEmitPass() override;
+ void addPreEmitPass2() override;
void addPreRegAlloc() override;
};
}
@@ -99,6 +96,13 @@ bool RISCVPassConfig::addInstSelector() {
void RISCVPassConfig::addPreEmitPass() { addPass(&BranchRelaxationPassID); }
+void RISCVPassConfig::addPreEmitPass2() {
+ // Schedule the expansion of AMOs at the last possible moment, avoiding the
+ // possibility for other passes to break the requirements for forward
+ // progress in the LR/SC block.
+ addPass(createRISCVExpandPseudoPass());
+}
+
void RISCVPassConfig::addPreRegAlloc() {
addPass(createRISCVMergeBaseOffsetOptPass());
}
diff --git a/lib/Target/RISCV/Utils/CMakeLists.txt b/lib/Target/RISCV/Utils/CMakeLists.txt
new file mode 100644
index 000000000000..727ab4a9fd77
--- /dev/null
+++ b/lib/Target/RISCV/Utils/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_llvm_library(LLVMRISCVUtils
+ RISCVBaseInfo.cpp
+ RISCVMatInt.cpp
+ )
diff --git a/lib/Target/RISCV/Utils/LLVMBuild.txt b/lib/Target/RISCV/Utils/LLVMBuild.txt
new file mode 100644
index 000000000000..ec75b9303669
--- /dev/null
+++ b/lib/Target/RISCV/Utils/LLVMBuild.txt
@@ -0,0 +1,24 @@
+;===- ./lib/Target/RISCV/Utils/LLVMBuild.txt ----------------*- Conf -*--===;
+;
+; The LLVM Compiler Infrastructure
+;
+; This file is distributed under the University of Illinois Open Source
+; License. See LICENSE.TXT for details.
+;
+;===------------------------------------------------------------------------===;
+;
+; This is an LLVMBuild description file for the components in this subdirectory.
+;
+; For more information on the LLVMBuild system, please see:
+;
+; http://llvm.org/docs/LLVMBuild.html
+;
+;===------------------------------------------------------------------------===;
+
+[component_0]
+type = Library
+name = RISCVUtils
+parent = RISCV
+required_libraries = Support
+add_to_library_groups = RISCV
+
diff --git a/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp b/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
new file mode 100644
index 000000000000..964af1f74cec
--- /dev/null
+++ b/lib/Target/RISCV/Utils/RISCVBaseInfo.cpp
@@ -0,0 +1,9 @@
+#include "RISCVBaseInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+namespace RISCVSysReg {
+#define GET_SysRegsList_IMPL
+#include "RISCVGenSystemOperands.inc"
+} // namespace RISCVSysReg
+} // namespace llvm
diff --git a/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/lib/Target/RISCV/Utils/RISCVBaseInfo.h
index b278a2ed3903..372e0e80bbaf 100644
--- a/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/lib/Target/RISCV/Utils/RISCVBaseInfo.h
@@ -14,9 +14,10 @@
#ifndef LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
#define LLVM_LIB_TARGET_RISCV_MCTARGETDESC_RISCVBASEINFO_H
-#include "RISCVMCTargetDesc.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/SubtargetFeature.h"
namespace llvm {
@@ -38,9 +39,10 @@ enum {
InstFormatCIW = 11,
InstFormatCL = 12,
InstFormatCS = 13,
- InstFormatCB = 14,
- InstFormatCJ = 15,
- InstFormatOther = 16,
+ InstFormatCA = 14,
+ InstFormatCB = 15,
+ InstFormatCJ = 16,
+ InstFormatOther = 17,
InstFormatMask = 31
};
@@ -104,7 +106,53 @@ inline static RoundingMode stringToRoundingMode(StringRef Str) {
.Case("dyn", RISCVFPRndMode::DYN)
.Default(RISCVFPRndMode::Invalid);
}
+
+inline static bool isValidRoundingMode(unsigned Mode) {
+ switch (Mode) {
+ default:
+ return false;
+ case RISCVFPRndMode::RNE:
+ case RISCVFPRndMode::RTZ:
+ case RISCVFPRndMode::RDN:
+ case RISCVFPRndMode::RUP:
+ case RISCVFPRndMode::RMM:
+ case RISCVFPRndMode::DYN:
+ return true;
+ }
+}
} // namespace RISCVFPRndMode
+
+namespace RISCVSysReg {
+struct SysReg {
+ const char *Name;
+ unsigned Encoding;
+ // FIXME: add these additional fields when needed.
+ // Privilege Access: Read, Write, Read-Only.
+ // unsigned ReadWrite;
+ // Privilege Mode: User, System or Machine.
+ // unsigned Mode;
+ // Check field name.
+ // unsigned Extra;
+ // Register number without the privilege bits.
+ // unsigned Number;
+ FeatureBitset FeaturesRequired;
+ bool isRV32Only;
+
+ bool haveRequiredFeatures(FeatureBitset ActiveFeatures) const {
+ // Not in 32-bit mode.
+ if (isRV32Only && ActiveFeatures[RISCV::Feature64Bit])
+ return false;
+ // No required feature associated with the system register.
+ if (FeaturesRequired.none())
+ return true;
+ return (FeaturesRequired & ActiveFeatures) == FeaturesRequired;
+ }
+};
+
+#define GET_SysRegsList_DECL
+#include "RISCVGenSystemOperands.inc"
+} // end namespace RISCVSysReg
+
} // namespace llvm
#endif
diff --git a/lib/Target/RISCV/Utils/RISCVMatInt.cpp b/lib/Target/RISCV/Utils/RISCVMatInt.cpp
new file mode 100644
index 000000000000..3dc298246bc5
--- /dev/null
+++ b/lib/Target/RISCV/Utils/RISCVMatInt.cpp
@@ -0,0 +1,79 @@
+//===- RISCVMatInt.cpp - Immediate materialisation -------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCVMatInt.h"
+#include "MCTargetDesc/RISCVMCTargetDesc.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MachineValueType.h"
+#include "llvm/Support/MathExtras.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace RISCVMatInt {
+void generateInstSeq(int64_t Val, bool Is64Bit, InstSeq &Res) {
+ if (isInt<32>(Val)) {
+ // Depending on the active bits in the immediate Value v, the following
+ // instruction sequences are emitted:
+ //
+ // v == 0 : ADDI
+ // v[0,12) != 0 && v[12,32) == 0 : ADDI
+ // v[0,12) == 0 && v[12,32) != 0 : LUI
+ // v[0,32) != 0 : LUI+ADDI(W)
+ int64_t Hi20 = ((Val + 0x800) >> 12) & 0xFFFFF;
+ int64_t Lo12 = SignExtend64<12>(Val);
+
+ if (Hi20)
+ Res.push_back(Inst(RISCV::LUI, Hi20));
+
+ if (Lo12 || Hi20 == 0) {
+ unsigned AddiOpc = (Is64Bit && Hi20) ? RISCV::ADDIW : RISCV::ADDI;
+ Res.push_back(Inst(AddiOpc, Lo12));
+ }
+ return;
+ }
+
+ assert(Is64Bit && "Can't emit >32-bit imm for non-RV64 target");
+
+ // In the worst case, for a full 64-bit constant, a sequence of 8 instructions
+ // (i.e., LUI+ADDIW+SLLI+ADDI+SLLI+ADDI+SLLI+ADDI) has to be emmitted. Note
+ // that the first two instructions (LUI+ADDIW) can contribute up to 32 bits
+ // while the following ADDI instructions contribute up to 12 bits each.
+ //
+ // On the first glance, implementing this seems to be possible by simply
+ // emitting the most significant 32 bits (LUI+ADDIW) followed by as many left
+ // shift (SLLI) and immediate additions (ADDI) as needed. However, due to the
+ // fact that ADDI performs a sign extended addition, doing it like that would
+ // only be possible when at most 11 bits of the ADDI instructions are used.
+ // Using all 12 bits of the ADDI instructions, like done by GAS, actually
+ // requires that the constant is processed starting with the least significant
+ // bit.
+ //
+ // In the following, constants are processed from LSB to MSB but instruction
+ // emission is performed from MSB to LSB by recursively calling
+ // generateInstSeq. In each recursion, first the lowest 12 bits are removed
+ // from the constant and the optimal shift amount, which can be greater than
+ // 12 bits if the constant is sparse, is determined. Then, the shifted
+ // remaining constant is processed recursively and gets emitted as soon as it
+ // fits into 32 bits. The emission of the shifts and additions is subsequently
+ // performed when the recursion returns.
+
+ int64_t Lo12 = SignExtend64<12>(Val);
+ int64_t Hi52 = (Val + 0x800) >> 12;
+ int ShiftAmount = 12 + findFirstSet((uint64_t)Hi52);
+ Hi52 = SignExtend64(Hi52 >> (ShiftAmount - 12), 64 - ShiftAmount);
+
+ generateInstSeq(Hi52, Is64Bit, Res);
+
+ Res.push_back(Inst(RISCV::SLLI, ShiftAmount));
+ if (Lo12)
+ Res.push_back(Inst(RISCV::ADDI, Lo12));
+}
+} // namespace RISCVMatInt
+} // namespace llvm
diff --git a/lib/Target/RISCV/Utils/RISCVMatInt.h b/lib/Target/RISCV/Utils/RISCVMatInt.h
new file mode 100644
index 000000000000..49d1d89adc7a
--- /dev/null
+++ b/lib/Target/RISCV/Utils/RISCVMatInt.h
@@ -0,0 +1,36 @@
+//===- RISCVMatInt.h - Immediate materialisation ---------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_RISCV_MATINT_H
+#define LLVM_LIB_TARGET_RISCV_MATINT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MachineValueType.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace RISCVMatInt {
+struct Inst {
+ unsigned Opc;
+ int64_t Imm;
+
+ Inst(unsigned Opc, int64_t Imm) : Opc(Opc), Imm(Imm) {}
+};
+using InstSeq = SmallVector<Inst, 8>;
+
+// Helper to generate an instruction sequence that will materialise the given
+// immediate value into a register. A sequence of instructions represented by
+// a simple struct produced rather than directly emitting the instructions in
+// order to allow this helper to be used from both the MC layer and during
+// instruction selection.
+void generateInstSeq(int64_t Val, bool IsRV64, InstSeq &Res);
+} // namespace RISCVMatInt
+} // namespace llvm
+#endif