diff options
Diffstat (limited to 'contrib/llvm/lib/Target/WebAssembly')
86 files changed, 4512 insertions, 2442 deletions
diff --git a/contrib/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/contrib/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp new file mode 100644 index 000000000000..2d92b93ca704 --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp @@ -0,0 +1,561 @@ +//==- WebAssemblyAsmParser.cpp - Assembler for WebAssembly -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is part of the WebAssembly Assembler. +/// +/// It contains code to translate a parsed .s file into MCInsts. +/// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" +#include "MCTargetDesc/WebAssemblyTargetStreamer.h" +#include "WebAssembly.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCParser/MCTargetAsmParser.h" +#include "llvm/MC/MCParser/MCParsedAsmOperand.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/Support/Endian.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +#define DEBUG_TYPE "wasm-asm-parser" + +namespace { + +// We store register types as SimpleValueType to retain SIMD layout +// information, but must also be able to supply them as the (unnamed) +// register enum from WebAssemblyRegisterInfo.td/.inc. +static unsigned MVTToWasmReg(MVT::SimpleValueType Type) { + switch(Type) { + case MVT::i32: return WebAssembly::I32_0; + case MVT::i64: return WebAssembly::I64_0; + case MVT::f32: return WebAssembly::F32_0; + case MVT::f64: return WebAssembly::F64_0; + case MVT::v16i8: return WebAssembly::V128_0; + case MVT::v8i16: return WebAssembly::V128_0; + case MVT::v4i32: return WebAssembly::V128_0; + case MVT::v4f32: return WebAssembly::V128_0; + default: return MVT::INVALID_SIMPLE_VALUE_TYPE; + } +} + +/// WebAssemblyOperand - Instances of this class represent the operands in a +/// parsed WASM machine instruction. +struct WebAssemblyOperand : public MCParsedAsmOperand { + enum KindTy { Token, Local, Stack, Integer, Float, Symbol } Kind; + + SMLoc StartLoc, EndLoc; + + struct TokOp { + StringRef Tok; + }; + + struct RegOp { + // This is a (virtual) local or stack register represented as 0.. + unsigned RegNo; + // In most targets, the register number also encodes the type, but for + // wasm we have to track that seperately since we have an unbounded + // number of registers. + // This has the unfortunate side effect that we supply a different value + // to the table-gen matcher at different times in the process (when it + // calls getReg() or addRegOperands(). + // TODO: While this works, it feels brittle. and would be nice to clean up. + MVT::SimpleValueType Type; + }; + + struct IntOp { + int64_t Val; + }; + + struct FltOp { + double Val; + }; + + struct SymOp { + const MCExpr *Exp; + }; + + union { + struct TokOp Tok; + struct RegOp Reg; + struct IntOp Int; + struct FltOp Flt; + struct SymOp Sym; + }; + + WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, TokOp T) + : Kind(K), StartLoc(Start), EndLoc(End), Tok(T) {} + WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, RegOp R) + : Kind(K), StartLoc(Start), EndLoc(End), Reg(R) {} + WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, IntOp I) + : Kind(K), StartLoc(Start), EndLoc(End), Int(I) {} + WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, FltOp F) + : Kind(K), StartLoc(Start), EndLoc(End), Flt(F) {} + WebAssemblyOperand(KindTy K, SMLoc Start, SMLoc End, SymOp S) + : Kind(K), StartLoc(Start), EndLoc(End), Sym(S) {} + + bool isToken() const override { return Kind == Token; } + bool isImm() const override { return Kind == Integer || + Kind == Float || + Kind == Symbol; } + bool isReg() const override { return Kind == Local || Kind == Stack; } + bool isMem() const override { return false; } + + unsigned getReg() const override { + assert(isReg()); + // This is called from the tablegen matcher (MatchInstructionImpl) + // where it expects to match the type of register, see RegOp above. + return MVTToWasmReg(Reg.Type); + } + + StringRef getToken() const { + assert(isToken()); + return Tok.Tok; + } + + SMLoc getStartLoc() const override { return StartLoc; } + SMLoc getEndLoc() const override { return EndLoc; } + + void addRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + assert(isReg() && "Not a register operand!"); + // This is called from the tablegen matcher (MatchInstructionImpl) + // where it expects to output the actual register index, see RegOp above. + unsigned R = Reg.RegNo; + if (Kind == Stack) { + // A stack register is represented as a large negative number. + // See WebAssemblyRegNumbering::runOnMachineFunction and + // getWARegStackId for why this | is needed. + R |= INT32_MIN; + } + Inst.addOperand(MCOperand::createReg(R)); + } + + void addImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + if (Kind == Integer) + Inst.addOperand(MCOperand::createImm(Int.Val)); + else if (Kind == Float) + Inst.addOperand(MCOperand::createFPImm(Flt.Val)); + else if (Kind == Symbol) + Inst.addOperand(MCOperand::createExpr(Sym.Exp)); + else + llvm_unreachable("Should be immediate or symbol!"); + } + + void print(raw_ostream &OS) const override { + switch (Kind) { + case Token: + OS << "Tok:" << Tok.Tok; + break; + case Local: + OS << "Loc:" << Reg.RegNo << ":" << static_cast<int>(Reg.Type); + break; + case Stack: + OS << "Stk:" << Reg.RegNo << ":" << static_cast<int>(Reg.Type); + break; + case Integer: + OS << "Int:" << Int.Val; + break; + case Float: + OS << "Flt:" << Flt.Val; + break; + case Symbol: + OS << "Sym:" << Sym.Exp; + break; + } + } +}; + +class WebAssemblyAsmParser final : public MCTargetAsmParser { + MCAsmParser &Parser; + MCAsmLexer &Lexer; + // These are for the current function being parsed: + // These are vectors since register assignments are so far non-sparse. + // Replace by map if necessary. + std::vector<MVT::SimpleValueType> LocalTypes; + std::vector<MVT::SimpleValueType> StackTypes; + MCSymbol *LastLabel; + +public: + WebAssemblyAsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser, + const MCInstrInfo &mii, const MCTargetOptions &Options) + : MCTargetAsmParser(Options, sti, mii), Parser(Parser), + Lexer(Parser.getLexer()), LastLabel(nullptr) { + } + +#define GET_ASSEMBLER_HEADER +#include "WebAssemblyGenAsmMatcher.inc" + + // TODO: This is required to be implemented, but appears unused. + bool ParseRegister(unsigned &/*RegNo*/, SMLoc &/*StartLoc*/, + SMLoc &/*EndLoc*/) override { + llvm_unreachable("ParseRegister is not implemented."); + } + + bool Error(const StringRef &msg, const AsmToken &tok) { + return Parser.Error(tok.getLoc(), msg + tok.getString()); + } + + bool IsNext(AsmToken::TokenKind Kind) { + auto ok = Lexer.is(Kind); + if (ok) Parser.Lex(); + return ok; + } + + bool Expect(AsmToken::TokenKind Kind, const char *KindName) { + if (!IsNext(Kind)) + return Error(std::string("Expected ") + KindName + ", instead got: ", + Lexer.getTok()); + return false; + } + + MVT::SimpleValueType ParseRegType(const StringRef &RegType) { + // Derive type from .param .local decls, or the instruction itself. + return StringSwitch<MVT::SimpleValueType>(RegType) + .Case("i32", MVT::i32) + .Case("i64", MVT::i64) + .Case("f32", MVT::f32) + .Case("f64", MVT::f64) + .Case("i8x16", MVT::v16i8) + .Case("i16x8", MVT::v8i16) + .Case("i32x4", MVT::v4i32) + .Case("f32x4", MVT::v4f32) + .Default(MVT::INVALID_SIMPLE_VALUE_TYPE); + } + + MVT::SimpleValueType &GetType( + std::vector<MVT::SimpleValueType> &Types, size_t i) { + Types.resize(std::max(i + 1, Types.size()), MVT::INVALID_SIMPLE_VALUE_TYPE); + return Types[i]; + } + + bool ParseReg(OperandVector &Operands, StringRef TypePrefix) { + if (Lexer.is(AsmToken::Integer)) { + auto &Local = Lexer.getTok(); + // This is a reference to a local, turn it into a virtual register. + auto LocalNo = static_cast<unsigned>(Local.getIntVal()); + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Local, Local.getLoc(), + Local.getEndLoc(), + WebAssemblyOperand::RegOp{LocalNo, + GetType(LocalTypes, LocalNo)})); + Parser.Lex(); + } else if (Lexer.is(AsmToken::Identifier)) { + auto &StackRegTok = Lexer.getTok(); + // These are push/pop/drop pseudo stack registers, which we turn + // into virtual registers also. The stackify pass will later turn them + // back into implicit stack references if possible. + auto StackReg = StackRegTok.getString(); + auto StackOp = StackReg.take_while([](char c) { return isalpha(c); }); + auto Reg = StackReg.drop_front(StackOp.size()); + unsigned long long ParsedRegNo = 0; + if (!Reg.empty() && getAsUnsignedInteger(Reg, 10, ParsedRegNo)) + return Error("Cannot parse stack register index: ", StackRegTok); + unsigned RegNo = static_cast<unsigned>(ParsedRegNo); + if (StackOp == "push") { + // This defines a result, record register type. + auto RegType = ParseRegType(TypePrefix); + GetType(StackTypes, RegNo) = RegType; + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Stack, + StackRegTok.getLoc(), + StackRegTok.getEndLoc(), + WebAssemblyOperand::RegOp{RegNo, RegType})); + } else if (StackOp == "pop") { + // This uses a previously defined stack value. + auto RegType = GetType(StackTypes, RegNo); + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Stack, + StackRegTok.getLoc(), + StackRegTok.getEndLoc(), + WebAssemblyOperand::RegOp{RegNo, RegType})); + } else if (StackOp == "drop") { + // This operand will be dropped, since it is part of an instruction + // whose result is void. + } else { + return Error("Unknown stack register prefix: ", StackRegTok); + } + Parser.Lex(); + } else { + return Error( + "Expected identifier/integer following $, instead got: ", + Lexer.getTok()); + } + IsNext(AsmToken::Equal); + return false; + } + + void ParseSingleInteger(bool IsNegative, OperandVector &Operands) { + auto &Int = Lexer.getTok(); + int64_t Val = Int.getIntVal(); + if (IsNegative) Val = -Val; + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Integer, Int.getLoc(), + Int.getEndLoc(), WebAssemblyOperand::IntOp{Val})); + Parser.Lex(); + } + + bool ParseOperandStartingWithInteger(bool IsNegative, + OperandVector &Operands, + StringRef InstType) { + ParseSingleInteger(IsNegative, Operands); + if (Lexer.is(AsmToken::LParen)) { + // Parse load/store operands of the form: offset($reg)align + auto &LParen = Lexer.getTok(); + Operands.push_back( + make_unique<WebAssemblyOperand>(WebAssemblyOperand::Token, + LParen.getLoc(), + LParen.getEndLoc(), + WebAssemblyOperand::TokOp{ + LParen.getString()})); + Parser.Lex(); + if (Expect(AsmToken::Dollar, "register")) return true; + if (ParseReg(Operands, InstType)) return true; + auto &RParen = Lexer.getTok(); + Operands.push_back( + make_unique<WebAssemblyOperand>(WebAssemblyOperand::Token, + RParen.getLoc(), + RParen.getEndLoc(), + WebAssemblyOperand::TokOp{ + RParen.getString()})); + if (Expect(AsmToken::RParen, ")")) return true; + if (Lexer.is(AsmToken::Integer)) { + ParseSingleInteger(false, Operands); + } else { + // Alignment not specified. + // FIXME: correctly derive a default from the instruction. + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Integer, RParen.getLoc(), + RParen.getEndLoc(), WebAssemblyOperand::IntOp{0})); + } + } + return false; + } + + bool ParseInstruction(ParseInstructionInfo &/*Info*/, StringRef Name, + SMLoc NameLoc, OperandVector &Operands) override { + Operands.push_back( + make_unique<WebAssemblyOperand>(WebAssemblyOperand::Token, NameLoc, + SMLoc::getFromPointer( + NameLoc.getPointer() + Name.size()), + WebAssemblyOperand::TokOp{ + StringRef(NameLoc.getPointer(), + Name.size())})); + auto NamePair = Name.split('.'); + // If no '.', there is no type prefix. + if (NamePair.second.empty()) std::swap(NamePair.first, NamePair.second); + while (Lexer.isNot(AsmToken::EndOfStatement)) { + auto &Tok = Lexer.getTok(); + switch (Tok.getKind()) { + case AsmToken::Dollar: { + Parser.Lex(); + if (ParseReg(Operands, NamePair.first)) return true; + break; + } + case AsmToken::Identifier: { + auto &Id = Lexer.getTok(); + const MCExpr *Val; + SMLoc End; + if (Parser.parsePrimaryExpr(Val, End)) + return Error("Cannot parse symbol: ", Lexer.getTok()); + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Symbol, Id.getLoc(), + Id.getEndLoc(), WebAssemblyOperand::SymOp{Val})); + break; + } + case AsmToken::Minus: + Parser.Lex(); + if (Lexer.isNot(AsmToken::Integer)) + return Error("Expected integer instead got: ", Lexer.getTok()); + if (ParseOperandStartingWithInteger(true, Operands, NamePair.first)) + return true; + break; + case AsmToken::Integer: + if (ParseOperandStartingWithInteger(false, Operands, NamePair.first)) + return true; + break; + case AsmToken::Real: { + double Val; + if (Tok.getString().getAsDouble(Val, false)) + return Error("Cannot parse real: ", Tok); + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Float, Tok.getLoc(), + Tok.getEndLoc(), WebAssemblyOperand::FltOp{Val})); + Parser.Lex(); + break; + } + default: + return Error("Unexpected token in operand: ", Tok); + } + if (Lexer.isNot(AsmToken::EndOfStatement)) { + if (Expect(AsmToken::Comma, ",")) return true; + } + } + Parser.Lex(); + // Call instructions are vararg, but the tablegen matcher doesn't seem to + // support that, so for now we strip these extra operands. + // This is problematic if these arguments are not simple $pop stack + // registers, since e.g. a local register would get lost, so we check for + // this. This can be the case when using -disable-wasm-explicit-locals + // which currently s2wasm requires. + // TODO: Instead, we can move this code to MatchAndEmitInstruction below and + // actually generate get_local instructions on the fly. + // Or even better, improve the matcher to support vararg? + auto IsIndirect = NamePair.second == "call_indirect"; + if (IsIndirect || NamePair.second == "call") { + // Figure out number of fixed operands from the instruction. + size_t CallOperands = 1; // The name token. + if (!IsIndirect) CallOperands++; // The function index. + if (!NamePair.first.empty()) CallOperands++; // The result register. + if (Operands.size() > CallOperands) { + // Ensure operands we drop are all $pop. + for (size_t I = CallOperands; I < Operands.size(); I++) { + auto Operand = + reinterpret_cast<WebAssemblyOperand *>(Operands[I].get()); + if (Operand->Kind != WebAssemblyOperand::Stack) + Parser.Error(NameLoc, + "Call instruction has non-stack arguments, if this code was " + "generated with -disable-wasm-explicit-locals please remove it"); + } + // Drop unneeded operands. + Operands.resize(CallOperands); + } + } + // Block instructions require a signature index, but these are missing in + // assembly, so we add a dummy one explicitly (since we have no control + // over signature tables here, we assume these will be regenerated when + // the wasm module is generated). + if (NamePair.second == "block" || NamePair.second == "loop") { + Operands.push_back(make_unique<WebAssemblyOperand>( + WebAssemblyOperand::Integer, NameLoc, + NameLoc, WebAssemblyOperand::IntOp{-1})); + } + // These don't specify the type, which has to derived from the local index. + if (NamePair.second == "get_local" || NamePair.second == "tee_local") { + if (Operands.size() >= 3 && Operands[1]->isReg() && + Operands[2]->isImm()) { + auto Op1 = reinterpret_cast<WebAssemblyOperand *>(Operands[1].get()); + auto Op2 = reinterpret_cast<WebAssemblyOperand *>(Operands[2].get()); + auto Type = GetType(LocalTypes, static_cast<size_t>(Op2->Int.Val)); + Op1->Reg.Type = Type; + GetType(StackTypes, Op1->Reg.RegNo) = Type; + } + } + return false; + } + + void onLabelParsed(MCSymbol *Symbol) override { + LastLabel = Symbol; + } + + bool ParseDirective(AsmToken DirectiveID) override { + assert(DirectiveID.getKind() == AsmToken::Identifier); + auto &Out = getStreamer(); + auto &TOut = reinterpret_cast<WebAssemblyTargetStreamer &>( + *Out.getTargetStreamer()); + // TODO: we're just parsing the subset of directives we're interested in, + // and ignoring ones we don't recognise. We should ideally verify + // all directives here. + if (DirectiveID.getString() == ".type") { + // This could be the start of a function, check if followed by + // "label,@function" + if (!(IsNext(AsmToken::Identifier) && + IsNext(AsmToken::Comma) && + IsNext(AsmToken::At) && + Lexer.is(AsmToken::Identifier))) + return Error("Expected label,@type declaration, got: ", Lexer.getTok()); + if (Lexer.getTok().getString() == "function") { + // Track locals from start of function. + LocalTypes.clear(); + StackTypes.clear(); + } + Parser.Lex(); + //Out.EmitSymbolAttribute(??, MCSA_ELF_TypeFunction); + } else if (DirectiveID.getString() == ".param" || + DirectiveID.getString() == ".local") { + // Track the number of locals, needed for correct virtual register + // assignment elsewhere. + // Also output a directive to the streamer. + std::vector<MVT> Params; + std::vector<MVT> Locals; + while (Lexer.is(AsmToken::Identifier)) { + auto RegType = ParseRegType(Lexer.getTok().getString()); + if (RegType == MVT::INVALID_SIMPLE_VALUE_TYPE) return true; + LocalTypes.push_back(RegType); + if (DirectiveID.getString() == ".param") { + Params.push_back(RegType); + } else { + Locals.push_back(RegType); + } + Parser.Lex(); + if (!IsNext(AsmToken::Comma)) break; + } + assert(LastLabel); + TOut.emitParam(LastLabel, Params); + TOut.emitLocal(Locals); + } else { + // For now, ignore anydirective we don't recognize: + while (Lexer.isNot(AsmToken::EndOfStatement)) Parser.Lex(); + } + return Expect(AsmToken::EndOfStatement, "EOL"); + } + + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &/*Opcode*/, + OperandVector &Operands, + MCStreamer &Out, uint64_t &ErrorInfo, + bool MatchingInlineAsm) override { + MCInst Inst; + unsigned MatchResult = + MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm); + switch (MatchResult) { + case Match_Success: { + Out.EmitInstruction(Inst, getSTI()); + return false; + } + case Match_MissingFeature: + return Parser.Error(IDLoc, + "instruction requires a WASM feature not currently enabled"); + case Match_MnemonicFail: + return Parser.Error(IDLoc, "invalid instruction"); + case Match_NearMisses: + return Parser.Error(IDLoc, "ambiguous instruction"); + case Match_InvalidTiedOperand: + case Match_InvalidOperand: { + SMLoc ErrorLoc = IDLoc; + if (ErrorInfo != ~0ULL) { + if (ErrorInfo >= Operands.size()) + return Parser.Error(IDLoc, "too few operands for instruction"); + ErrorLoc = Operands[ErrorInfo]->getStartLoc(); + if (ErrorLoc == SMLoc()) + ErrorLoc = IDLoc; + } + return Parser.Error(ErrorLoc, "invalid operand for instruction"); + } + } + llvm_unreachable("Implement any new match types added!"); + } +}; +} // end anonymous namespace + +// Force static initialization. +extern "C" void LLVMInitializeWebAssemblyAsmParser() { + RegisterMCAsmParser<WebAssemblyAsmParser> X(getTheWebAssemblyTarget32()); + RegisterMCAsmParser<WebAssemblyAsmParser> Y(getTheWebAssemblyTarget64()); +} + +#define GET_REGISTER_MATCHER +#define GET_MATCHER_IMPLEMENTATION +#include "WebAssemblyGenAsmMatcher.inc" diff --git a/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp b/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp index 9be11da9afac..2f0960271e30 100644 --- a/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file is part of the WebAssembly Disassembler. +/// This file is part of the WebAssembly Disassembler. /// /// It contains code to translate the data produced by the decoder into /// MCInsts. @@ -19,16 +19,23 @@ #include "WebAssembly.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCDisassembler/MCDisassembler.h" +#include "llvm/MC/MCFixedLenDisassembler.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/Endian.h" +#include "llvm/Support/LEB128.h" #include "llvm/Support/TargetRegistry.h" + using namespace llvm; #define DEBUG_TYPE "wasm-disassembler" +using DecodeStatus = MCDisassembler::DecodeStatus; + +#include "WebAssemblyGenDisassemblerTables.inc" + namespace { class WebAssemblyDisassembler final : public MCDisassembler { std::unique_ptr<const MCInstrInfo> MCII; @@ -60,11 +67,120 @@ extern "C" void LLVMInitializeWebAssemblyDisassembler() { createWebAssemblyDisassembler); } -MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction( - MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/, - raw_ostream &OS, raw_ostream &CS) const { +static int nextByte(ArrayRef<uint8_t> Bytes, uint64_t &Size) { + if (Size >= Bytes.size()) + return -1; + auto V = Bytes[Size]; + Size++; + return V; +} - // TODO: Implement disassembly. +static bool parseLEBImmediate(MCInst &MI, uint64_t &Size, + ArrayRef<uint8_t> Bytes, bool Signed) { + unsigned N = 0; + const char *Error = nullptr; + auto Val = Signed ? decodeSLEB128(Bytes.data() + Size, &N, + Bytes.data() + Bytes.size(), &Error) + : static_cast<int64_t>( + decodeULEB128(Bytes.data() + Size, &N, + Bytes.data() + Bytes.size(), &Error)); + if (Error) + return false; + Size += N; + MI.addOperand(MCOperand::createImm(Val)); + return true; +} + +template <typename T> +bool parseFPImmediate(MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes) { + if (Size + sizeof(T) > Bytes.size()) + return false; + T Val; + memcpy(&Val, Bytes.data() + Size, sizeof(T)); + support::endian::byte_swap<T, support::endianness::little>(Val); + Size += sizeof(T); + MI.addOperand(MCOperand::createFPImm(static_cast<double>(Val))); + return true; +} - return MCDisassembler::Fail; +MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction( + MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/, + raw_ostream & /*OS*/, raw_ostream &CS) const { + CommentStream = &CS; + Size = 0; + auto Opc = nextByte(Bytes, Size); + if (Opc < 0) + return MCDisassembler::Fail; + const auto *WasmInst = &InstructionTable0[Opc]; + // If this is a prefix byte, indirect to another table. + if (WasmInst->ET == ET_Prefix) { + WasmInst = nullptr; + // Linear search, so far only 2 entries. + for (auto PT = PrefixTable; PT->Table; PT++) { + if (PT->Prefix == Opc) { + WasmInst = PT->Table; + break; + } + } + if (!WasmInst) + return MCDisassembler::Fail; + Opc = nextByte(Bytes, Size); + if (Opc < 0) + return MCDisassembler::Fail; + WasmInst += Opc; + } + if (WasmInst->ET == ET_Unused) + return MCDisassembler::Fail; + // At this point we must have a valid instruction to decode. + assert(WasmInst->ET == ET_Instruction); + MI.setOpcode(WasmInst->Opcode); + // Parse any operands. + for (uint8_t OPI = 0; OPI < WasmInst->NumOperands; OPI++) { + switch (WasmInst->Operands[OPI]) { + // ULEB operands: + case WebAssembly::OPERAND_BASIC_BLOCK: + case WebAssembly::OPERAND_LOCAL: + case WebAssembly::OPERAND_GLOBAL: + case WebAssembly::OPERAND_FUNCTION32: + case WebAssembly::OPERAND_OFFSET32: + case WebAssembly::OPERAND_P2ALIGN: + case WebAssembly::OPERAND_TYPEINDEX: + case MCOI::OPERAND_IMMEDIATE: { + if (!parseLEBImmediate(MI, Size, Bytes, false)) + return MCDisassembler::Fail; + break; + } + // SLEB operands: + case WebAssembly::OPERAND_I32IMM: + case WebAssembly::OPERAND_I64IMM: + case WebAssembly::OPERAND_SIGNATURE: { + if (!parseLEBImmediate(MI, Size, Bytes, true)) + return MCDisassembler::Fail; + break; + } + // FP operands. + case WebAssembly::OPERAND_F32IMM: { + if (!parseFPImmediate<float>(MI, Size, Bytes)) + return MCDisassembler::Fail; + break; + } + case WebAssembly::OPERAND_F64IMM: { + if (!parseFPImmediate<double>(MI, Size, Bytes)) + return MCDisassembler::Fail; + break; + } + case MCOI::OPERAND_REGISTER: { + // These are NOT actually in the instruction stream, but MC is going to + // expect operands to be present for them! + // FIXME: can MC re-generate register assignments or do we have to + // do this? Since this function decodes a single instruction, we don't + // have the proper context for tracking an operand stack here. + MI.addOperand(MCOperand::createReg(0)); + break; + } + default: + llvm_unreachable("Unknown operand type in WebAssemblyDisassembler"); + } + } + return MCDisassembler::Success; } diff --git a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp index c3f0f2787146..10fa798ac8d7 100644 --- a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Print MCInst instructions to wasm format. +/// Print MCInst instructions to wasm format. /// //===----------------------------------------------------------------------===// @@ -46,7 +46,7 @@ void WebAssemblyInstPrinter::printRegName(raw_ostream &OS, void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot, - const MCSubtargetInfo & /*STI*/) { + const MCSubtargetInfo &STI) { // Print the instruction (this uses the AsmStrings from the .td files). printInstruction(MI, OS); @@ -82,10 +82,12 @@ void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, ControlFlowStack.push_back(std::make_pair(ControlFlowCounter++, false)); break; case WebAssembly::END_LOOP: - ControlFlowStack.pop_back(); + // Have to guard against an empty stack, in case of mismatched pairs + // in assembly parsing. + if (!ControlFlowStack.empty()) ControlFlowStack.pop_back(); break; case WebAssembly::END_BLOCK: - printAnnotation( + if (!ControlFlowStack.empty()) printAnnotation( OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':'); break; } @@ -176,10 +178,10 @@ void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, if (Info.OperandType == WebAssembly::OPERAND_F32IMM) { // TODO: MC converts all floating point immediate operands to double. // This is fine for numeric values, but may cause NaNs to change bits. - O << toString(APFloat(float(Op.getFPImm()))); + O << ::toString(APFloat(float(Op.getFPImm()))); } else { assert(Info.OperandType == WebAssembly::OPERAND_F64IMM); - O << toString(APFloat(Op.getFPImm())); + O << ::toString(APFloat(Op.getFPImm())); } } else { assert((OpNo < MII.get(MI->getOpcode()).getNumOperands() || @@ -192,20 +194,16 @@ void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, } } -void -WebAssemblyInstPrinter::printWebAssemblyP2AlignOperand(const MCInst *MI, - unsigned OpNo, - raw_ostream &O) { +void WebAssemblyInstPrinter::printWebAssemblyP2AlignOperand( + const MCInst *MI, unsigned OpNo, raw_ostream &O) { int64_t Imm = MI->getOperand(OpNo).getImm(); if (Imm == WebAssembly::GetDefaultP2Align(MI->getOpcode())) return; O << ":p2align=" << Imm; } -void -WebAssemblyInstPrinter::printWebAssemblySignatureOperand(const MCInst *MI, - unsigned OpNo, - raw_ostream &O) { +void WebAssemblyInstPrinter::printWebAssemblySignatureOperand( + const MCInst *MI, unsigned OpNo, raw_ostream &O) { int64_t Imm = MI->getOperand(OpNo).getImm(); switch (WebAssembly::ExprType(Imm)) { case WebAssembly::ExprType::Void: break; @@ -220,6 +218,7 @@ WebAssemblyInstPrinter::printWebAssemblySignatureOperand(const MCInst *MI, case WebAssembly::ExprType::B8x16: O << "b8x16"; break; case WebAssembly::ExprType::B16x8: O << "b16x8"; break; case WebAssembly::ExprType::B32x4: O << "b32x4"; break; + case WebAssembly::ExprType::ExceptRef: O << "except_ref"; break; } } @@ -238,6 +237,8 @@ const char *llvm::WebAssembly::TypeToString(MVT Ty) { case MVT::v4i32: case MVT::v4f32: return "v128"; + case MVT::ExceptRef: + return "except_ref"; default: llvm_unreachable("unsupported type"); } @@ -253,6 +254,8 @@ const char *llvm::WebAssembly::TypeToString(wasm::ValType Type) { return "f32"; case wasm::ValType::F64: return "f64"; + case wasm::ValType::EXCEPT_REF: + return "except_ref"; } llvm_unreachable("unsupported type"); } diff --git a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h index b1de84d7e8e6..f5b890a7615e 100644 --- a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h +++ b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This class prints an WebAssembly MCInst to wasm file syntax. +/// This class prints an WebAssembly MCInst to wasm file syntax. /// //===----------------------------------------------------------------------===// @@ -17,8 +17,8 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/BinaryFormat/Wasm.h" -#include "llvm/CodeGen/MachineValueType.h" #include "llvm/MC/MCInstPrinter.h" +#include "llvm/Support/MachineValueType.h" namespace llvm { diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp index 226a3b35f2cf..244c2189b455 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblyAsmBackend class. +/// This file implements the WebAssemblyAsmBackend class. /// //===----------------------------------------------------------------------===// @@ -17,7 +17,6 @@ #include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCDirectives.h" -#include "llvm/MC/MCELFObjectWriter.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCFixupKindInfo.h" #include "llvm/MC/MCObjectWriter.h" @@ -26,51 +25,17 @@ #include "llvm/MC/MCWasmObjectWriter.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" + using namespace llvm; namespace { -class WebAssemblyAsmBackendELF final : public MCAsmBackend { - bool Is64Bit; - -public: - explicit WebAssemblyAsmBackendELF(bool Is64Bit) - : MCAsmBackend(), Is64Bit(Is64Bit) {} - ~WebAssemblyAsmBackendELF() override {} - - void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target, MutableArrayRef<char> Data, - uint64_t Value, bool IsPCRel) const override; - - std::unique_ptr<MCObjectWriter> - createObjectWriter(raw_pwrite_stream &OS) const override; - - // No instruction requires relaxation - bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, - const MCRelaxableFragment *DF, - const MCAsmLayout &Layout) const override { - return false; - } - - unsigned getNumFixupKinds() const override { - // We currently just use the generic fixups in MCFixup.h and don't have any - // target-specific fixups. - return 0; - } - - bool mayNeedRelaxation(const MCInst &Inst) const override { return false; } - - void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, - MCInst &Res) const override {} - - bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; -}; class WebAssemblyAsmBackend final : public MCAsmBackend { bool Is64Bit; public: explicit WebAssemblyAsmBackend(bool Is64Bit) - : MCAsmBackend(), Is64Bit(Is64Bit) {} + : MCAsmBackend(support::little), Is64Bit(Is64Bit) {} ~WebAssemblyAsmBackend() override {} unsigned getNumFixupKinds() const override { @@ -81,10 +46,11 @@ public: void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef<char> Data, - uint64_t Value, bool IsPCRel) const override; + uint64_t Value, bool IsPCRel, + const MCSubtargetInfo *STI) const override; - std::unique_ptr<MCObjectWriter> - createObjectWriter(raw_pwrite_stream &OS) const override; + std::unique_ptr<MCObjectTargetWriter> + createObjectTargetWriter() const override; // No instruction requires relaxation bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, @@ -93,51 +59,17 @@ public: return false; } - bool mayNeedRelaxation(const MCInst &Inst) const override { return false; } + bool mayNeedRelaxation(const MCInst &Inst, + const MCSubtargetInfo &STI) const override { + return false; + } void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, MCInst &Res) const override {} - bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; + bool writeNopData(raw_ostream &OS, uint64_t Count) const override; }; -bool WebAssemblyAsmBackendELF::writeNopData(uint64_t Count, - MCObjectWriter *OW) const { - for (uint64_t i = 0; i < Count; ++i) - OW->write8(WebAssembly::Nop); - - return true; -} - -void WebAssemblyAsmBackendELF::applyFixup(const MCAssembler &Asm, - const MCFixup &Fixup, - const MCValue &Target, - MutableArrayRef<char> Data, - uint64_t Value, bool IsPCRel) const { - const MCFixupKindInfo &Info = getFixupKindInfo(Fixup.getKind()); - assert(Info.Flags == 0 && "WebAssembly does not use MCFixupKindInfo flags"); - - unsigned NumBytes = alignTo(Info.TargetSize, 8) / 8; - if (Value == 0) - return; // Doesn't change encoding. - - // Shift the value into position. - Value <<= Info.TargetOffset; - - unsigned Offset = Fixup.getOffset(); - assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); - - // For each byte of the fragment that the fixup touches, mask in the - // bits from the fixup value. - for (unsigned i = 0; i != NumBytes; ++i) - Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); -} - -std::unique_ptr<MCObjectWriter> -WebAssemblyAsmBackendELF::createObjectWriter(raw_pwrite_stream &OS) const { - return createWebAssemblyELFObjectWriter(OS, Is64Bit, 0); -} - const MCFixupKindInfo & WebAssemblyAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { const static MCFixupKindInfo Infos[WebAssembly::NumTargetFixupKinds] = { @@ -158,13 +90,10 @@ WebAssemblyAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { return Infos[Kind - FirstTargetFixupKind]; } -bool WebAssemblyAsmBackend::writeNopData(uint64_t Count, - MCObjectWriter *OW) const { - if (Count == 0) - return true; - +bool WebAssemblyAsmBackend::writeNopData(raw_ostream &OS, + uint64_t Count) const { for (uint64_t i = 0; i < Count; ++i) - OW->write8(WebAssembly::Nop); + OS << char(WebAssembly::Nop); return true; } @@ -173,7 +102,8 @@ void WebAssemblyAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef<char> Data, - uint64_t Value, bool IsPCRel) const { + uint64_t Value, bool IsPCRel, + const MCSubtargetInfo *STI) const { const MCFixupKindInfo &Info = getFixupKindInfo(Fixup.getKind()); assert(Info.Flags == 0 && "WebAssembly does not use MCFixupKindInfo flags"); @@ -193,14 +123,13 @@ void WebAssemblyAsmBackend::applyFixup(const MCAssembler &Asm, Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); } -std::unique_ptr<MCObjectWriter> -WebAssemblyAsmBackend::createObjectWriter(raw_pwrite_stream &OS) const { - return createWebAssemblyWasmObjectWriter(OS, Is64Bit); +std::unique_ptr<MCObjectTargetWriter> +WebAssemblyAsmBackend::createObjectTargetWriter() const { + return createWebAssemblyWasmObjectWriter(Is64Bit); } + } // end anonymous namespace MCAsmBackend *llvm::createWebAssemblyAsmBackend(const Triple &TT) { - if (TT.isOSBinFormatELF()) - return new WebAssemblyAsmBackendELF(TT.isArch64Bit()); return new WebAssemblyAsmBackend(TT.isArch64Bit()); } diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp deleted file mode 100644 index b67ecfa455b3..000000000000 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp +++ /dev/null @@ -1,68 +0,0 @@ -//===-- WebAssemblyELFObjectWriter.cpp - WebAssembly ELF Writer -----------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// \brief This file handles ELF-specific object emission, converting LLVM's -/// internal fixups into the appropriate relocations. -/// -//===----------------------------------------------------------------------===// - -#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" -#include "llvm/MC/MCELFObjectWriter.h" -#include "llvm/MC/MCFixup.h" -#include "llvm/MC/MCObjectWriter.h" -#include "llvm/Support/ErrorHandling.h" -using namespace llvm; - -namespace { -class WebAssemblyELFObjectWriter final : public MCELFObjectTargetWriter { -public: - WebAssemblyELFObjectWriter(bool Is64Bit, uint8_t OSABI); - -protected: - unsigned getRelocType(MCContext &Ctx, const MCValue &Target, - const MCFixup &Fixup, bool IsPCRel) const override; -}; -} // end anonymous namespace - -WebAssemblyELFObjectWriter::WebAssemblyELFObjectWriter(bool Is64Bit, - uint8_t OSABI) - : MCELFObjectTargetWriter(Is64Bit, OSABI, ELF::EM_WEBASSEMBLY, - /*HasRelocationAddend=*/false) {} - -unsigned WebAssemblyELFObjectWriter::getRelocType(MCContext &Ctx, - const MCValue &Target, - const MCFixup &Fixup, - bool IsPCRel) const { - // WebAssembly functions are not allocated in the address space. To resolve a - // pointer to a function, we must use a special relocation type. - if (const MCSymbolRefExpr *SyExp = - dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) - if (SyExp->getKind() == MCSymbolRefExpr::VK_WebAssembly_FUNCTION) - return ELF::R_WEBASSEMBLY_FUNCTION; - - switch (Fixup.getKind()) { - case FK_Data_4: - assert(!is64Bit() && "4-byte relocations only supported on wasm32"); - return ELF::R_WEBASSEMBLY_DATA; - case FK_Data_8: - assert(is64Bit() && "8-byte relocations only supported on wasm64"); - return ELF::R_WEBASSEMBLY_DATA; - default: - llvm_unreachable("unimplemented fixup kind"); - } -} - -std::unique_ptr<MCObjectWriter> -llvm::createWebAssemblyELFObjectWriter(raw_pwrite_stream &OS, - bool Is64Bit, - uint8_t OSABI) { - auto MOTW = llvm::make_unique<WebAssemblyELFObjectWriter>(Is64Bit, OSABI); - return createELFObjectWriter(std::move(MOTW), OS, /*IsLittleEndian=*/true); -} diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp index 5f8c78ed1683..44fcc129c39e 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp @@ -8,50 +8,18 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declarations of the WebAssemblyMCAsmInfo +/// This file contains the declarations of the WebAssemblyMCAsmInfo /// properties. /// //===----------------------------------------------------------------------===// #include "WebAssemblyMCAsmInfo.h" #include "llvm/ADT/Triple.h" + using namespace llvm; #define DEBUG_TYPE "wasm-mc-asm-info" -WebAssemblyMCAsmInfoELF::~WebAssemblyMCAsmInfoELF() {} - -WebAssemblyMCAsmInfoELF::WebAssemblyMCAsmInfoELF(const Triple &T) { - CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4; - - // TODO: What should MaxInstLength be? - - UseDataRegionDirectives = true; - - // Use .skip instead of .zero because .zero is confusing when used with two - // arguments (it doesn't actually zero things out). - ZeroDirective = "\t.skip\t"; - - Data8bitsDirective = "\t.int8\t"; - Data16bitsDirective = "\t.int16\t"; - Data32bitsDirective = "\t.int32\t"; - Data64bitsDirective = "\t.int64\t"; - - AlignmentIsInBytes = false; - COMMDirectiveAlignmentIsInBytes = false; - LCOMMDirectiveAlignmentType = LCOMM::Log2Alignment; - - SupportsDebugInformation = true; - - // For now, WebAssembly does not support exceptions. - ExceptionsType = ExceptionHandling::None; - - // TODO: UseIntegratedAssembler? - - // WebAssembly's stack is never executable. - UsesNonexecutableStackSection = false; -} - WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() {} WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) { @@ -76,8 +44,5 @@ WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) { SupportsDebugInformation = true; - // For now, WebAssembly does not support exceptions. - ExceptionsType = ExceptionHandling::None; - // TODO: UseIntegratedAssembler? } diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h index d9547096190e..8627a6e40c6a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h @@ -8,26 +8,19 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declaration of the WebAssemblyMCAsmInfo class. +/// This file contains the declaration of the WebAssemblyMCAsmInfo class. /// //===----------------------------------------------------------------------===// #ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCASMINFO_H #define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCASMINFO_H -#include "llvm/MC/MCAsmInfoELF.h" #include "llvm/MC/MCAsmInfoWasm.h" namespace llvm { class Triple; -class WebAssemblyMCAsmInfoELF final : public MCAsmInfoELF { -public: - explicit WebAssemblyMCAsmInfoELF(const Triple &T); - ~WebAssemblyMCAsmInfoELF() override; -}; - class WebAssemblyMCAsmInfo final : public MCAsmInfoWasm { public: explicit WebAssemblyMCAsmInfo(const Triple &T); diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp index 77744e53d62f..94ca94e1e18c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblyMCCodeEmitter class. +/// This file implements the WebAssemblyMCCodeEmitter class. /// //===----------------------------------------------------------------------===// @@ -23,9 +23,11 @@ #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCSymbol.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/EndianStream.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/raw_ostream.h" + using namespace llvm; #define DEBUG_TYPE "mccodeemitter" @@ -86,14 +88,18 @@ void WebAssemblyMCCodeEmitter::encodeInstruction( assert(Desc.TSFlags == 0 && "WebAssembly non-variable_ops don't use TSFlags"); const MCOperandInfo &Info = Desc.OpInfo[i]; + LLVM_DEBUG(dbgs() << "Encoding immediate: type=" + << int(Info.OperandType) << "\n"); if (Info.OperandType == WebAssembly::OPERAND_I32IMM) { encodeSLEB128(int32_t(MO.getImm()), OS); + } else if (Info.OperandType == WebAssembly::OPERAND_OFFSET32) { + encodeULEB128(uint32_t(MO.getImm()), OS); } else if (Info.OperandType == WebAssembly::OPERAND_I64IMM) { encodeSLEB128(int64_t(MO.getImm()), OS); } else if (Info.OperandType == WebAssembly::OPERAND_GLOBAL) { llvm_unreachable("wasm globals should only be accessed symbolicly"); } else if (Info.OperandType == WebAssembly::OPERAND_SIGNATURE) { - encodeSLEB128(int64_t(MO.getImm()), OS); + OS << uint8_t(MO.getImm()); } else { encodeULEB128(uint64_t(MO.getImm()), OS); } @@ -112,11 +118,11 @@ void WebAssemblyMCCodeEmitter::encodeInstruction( // TODO: MC converts all floating point immediate operands to double. // This is fine for numeric values, but may cause NaNs to change bits. float f = float(MO.getFPImm()); - support::endian::Writer<support::little>(OS).write<float>(f); + support::endian::write<float>(OS, f, support::little); } else { assert(Info.OperandType == WebAssembly::OPERAND_F64IMM); double d = MO.getFPImm(); - support::endian::Writer<support::little>(OS).write<double>(d); + support::endian::write<double>(OS, d, support::little); } } else if (MO.isExpr()) { const MCOperandInfo &Info = Desc.OpInfo[i]; diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp index e7c8809de70e..baf8a0c96c0a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file provides WebAssembly-specific target descriptions. +/// This file provides WebAssembly-specific target descriptions. /// //===----------------------------------------------------------------------===// @@ -36,8 +36,6 @@ using namespace llvm; static MCAsmInfo *createMCAsmInfo(const MCRegisterInfo & /*MRI*/, const Triple &TT) { - if (TT.isOSBinFormatELF()) - return new WebAssemblyMCAsmInfoELF(TT); return new WebAssemblyMCAsmInfo(TT); } @@ -82,10 +80,6 @@ static MCSubtargetInfo *createMCSubtargetInfo(const Triple &TT, StringRef CPU, static MCTargetStreamer * createObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) { - const Triple &TT = STI.getTargetTriple(); - if (TT.isOSBinFormatELF()) - return new WebAssemblyTargetELFStreamer(S); - return new WebAssemblyTargetWasmStreamer(S); } @@ -135,6 +129,7 @@ wasm::ValType WebAssembly::toValType(const MVT &Ty) { case MVT::i64: return wasm::ValType::I64; case MVT::f32: return wasm::ValType::F32; case MVT::f64: return wasm::ValType::F64; + case MVT::ExceptRef: return wasm::ValType::EXCEPT_REF; default: llvm_unreachable("unexpected type"); } } diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h index 7dca89ab822d..c1c8d243e920 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file provides WebAssembly-specific target descriptions. +/// This file provides WebAssembly-specific target descriptions. /// //===----------------------------------------------------------------------===// @@ -26,7 +26,7 @@ class MCAsmBackend; class MCCodeEmitter; class MCContext; class MCInstrInfo; -class MCObjectWriter; +class MCObjectTargetWriter; class MCSubtargetInfo; class MVT; class Target; @@ -40,13 +40,8 @@ MCCodeEmitter *createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII); MCAsmBackend *createWebAssemblyAsmBackend(const Triple &TT); -std::unique_ptr<MCObjectWriter> -createWebAssemblyELFObjectWriter(raw_pwrite_stream &OS, - bool Is64Bit, uint8_t OSABI); - -std::unique_ptr<MCObjectWriter> -createWebAssemblyWasmObjectWriter(raw_pwrite_stream &OS, - bool Is64Bit); +std::unique_ptr<MCObjectTargetWriter> +createWebAssemblyWasmObjectWriter(bool Is64Bit); namespace WebAssembly { enum OperandType { @@ -111,38 +106,166 @@ namespace WebAssembly { inline unsigned GetDefaultP2Align(unsigned Opcode) { switch (Opcode) { case WebAssembly::LOAD8_S_I32: + case WebAssembly::LOAD8_S_I32_S: case WebAssembly::LOAD8_U_I32: + case WebAssembly::LOAD8_U_I32_S: case WebAssembly::LOAD8_S_I64: + case WebAssembly::LOAD8_S_I64_S: case WebAssembly::LOAD8_U_I64: + case WebAssembly::LOAD8_U_I64_S: case WebAssembly::ATOMIC_LOAD8_U_I32: + case WebAssembly::ATOMIC_LOAD8_U_I32_S: case WebAssembly::ATOMIC_LOAD8_U_I64: + case WebAssembly::ATOMIC_LOAD8_U_I64_S: case WebAssembly::STORE8_I32: + case WebAssembly::STORE8_I32_S: case WebAssembly::STORE8_I64: + case WebAssembly::STORE8_I64_S: + case WebAssembly::ATOMIC_STORE8_I32: + case WebAssembly::ATOMIC_STORE8_I32_S: + case WebAssembly::ATOMIC_STORE8_I64: + case WebAssembly::ATOMIC_STORE8_I64_S: + case WebAssembly::ATOMIC_RMW8_U_ADD_I32: + case WebAssembly::ATOMIC_RMW8_U_ADD_I32_S: + case WebAssembly::ATOMIC_RMW8_U_ADD_I64: + case WebAssembly::ATOMIC_RMW8_U_ADD_I64_S: + case WebAssembly::ATOMIC_RMW8_U_SUB_I32: + case WebAssembly::ATOMIC_RMW8_U_SUB_I32_S: + case WebAssembly::ATOMIC_RMW8_U_SUB_I64: + case WebAssembly::ATOMIC_RMW8_U_SUB_I64_S: + case WebAssembly::ATOMIC_RMW8_U_AND_I32: + case WebAssembly::ATOMIC_RMW8_U_AND_I32_S: + case WebAssembly::ATOMIC_RMW8_U_AND_I64: + case WebAssembly::ATOMIC_RMW8_U_AND_I64_S: + case WebAssembly::ATOMIC_RMW8_U_OR_I32: + case WebAssembly::ATOMIC_RMW8_U_OR_I32_S: + case WebAssembly::ATOMIC_RMW8_U_OR_I64: + case WebAssembly::ATOMIC_RMW8_U_OR_I64_S: + case WebAssembly::ATOMIC_RMW8_U_XOR_I32: + case WebAssembly::ATOMIC_RMW8_U_XOR_I32_S: + case WebAssembly::ATOMIC_RMW8_U_XOR_I64: + case WebAssembly::ATOMIC_RMW8_U_XOR_I64_S: + case WebAssembly::ATOMIC_RMW8_U_XCHG_I32: + case WebAssembly::ATOMIC_RMW8_U_XCHG_I32_S: + case WebAssembly::ATOMIC_RMW8_U_XCHG_I64: + case WebAssembly::ATOMIC_RMW8_U_XCHG_I64_S: return 0; case WebAssembly::LOAD16_S_I32: + case WebAssembly::LOAD16_S_I32_S: case WebAssembly::LOAD16_U_I32: + case WebAssembly::LOAD16_U_I32_S: case WebAssembly::LOAD16_S_I64: + case WebAssembly::LOAD16_S_I64_S: case WebAssembly::LOAD16_U_I64: + case WebAssembly::LOAD16_U_I64_S: case WebAssembly::ATOMIC_LOAD16_U_I32: + case WebAssembly::ATOMIC_LOAD16_U_I32_S: case WebAssembly::ATOMIC_LOAD16_U_I64: + case WebAssembly::ATOMIC_LOAD16_U_I64_S: case WebAssembly::STORE16_I32: + case WebAssembly::STORE16_I32_S: case WebAssembly::STORE16_I64: + case WebAssembly::STORE16_I64_S: + case WebAssembly::ATOMIC_STORE16_I32: + case WebAssembly::ATOMIC_STORE16_I32_S: + case WebAssembly::ATOMIC_STORE16_I64: + case WebAssembly::ATOMIC_STORE16_I64_S: + case WebAssembly::ATOMIC_RMW16_U_ADD_I32: + case WebAssembly::ATOMIC_RMW16_U_ADD_I32_S: + case WebAssembly::ATOMIC_RMW16_U_ADD_I64: + case WebAssembly::ATOMIC_RMW16_U_ADD_I64_S: + case WebAssembly::ATOMIC_RMW16_U_SUB_I32: + case WebAssembly::ATOMIC_RMW16_U_SUB_I32_S: + case WebAssembly::ATOMIC_RMW16_U_SUB_I64: + case WebAssembly::ATOMIC_RMW16_U_SUB_I64_S: + case WebAssembly::ATOMIC_RMW16_U_AND_I32: + case WebAssembly::ATOMIC_RMW16_U_AND_I32_S: + case WebAssembly::ATOMIC_RMW16_U_AND_I64: + case WebAssembly::ATOMIC_RMW16_U_AND_I64_S: + case WebAssembly::ATOMIC_RMW16_U_OR_I32: + case WebAssembly::ATOMIC_RMW16_U_OR_I32_S: + case WebAssembly::ATOMIC_RMW16_U_OR_I64: + case WebAssembly::ATOMIC_RMW16_U_OR_I64_S: + case WebAssembly::ATOMIC_RMW16_U_XOR_I32: + case WebAssembly::ATOMIC_RMW16_U_XOR_I32_S: + case WebAssembly::ATOMIC_RMW16_U_XOR_I64: + case WebAssembly::ATOMIC_RMW16_U_XOR_I64_S: + case WebAssembly::ATOMIC_RMW16_U_XCHG_I32: + case WebAssembly::ATOMIC_RMW16_U_XCHG_I32_S: + case WebAssembly::ATOMIC_RMW16_U_XCHG_I64: + case WebAssembly::ATOMIC_RMW16_U_XCHG_I64_S: return 1; case WebAssembly::LOAD_I32: + case WebAssembly::LOAD_I32_S: case WebAssembly::LOAD_F32: + case WebAssembly::LOAD_F32_S: case WebAssembly::STORE_I32: + case WebAssembly::STORE_I32_S: case WebAssembly::STORE_F32: + case WebAssembly::STORE_F32_S: case WebAssembly::LOAD32_S_I64: + case WebAssembly::LOAD32_S_I64_S: case WebAssembly::LOAD32_U_I64: + case WebAssembly::LOAD32_U_I64_S: case WebAssembly::STORE32_I64: + case WebAssembly::STORE32_I64_S: case WebAssembly::ATOMIC_LOAD_I32: + case WebAssembly::ATOMIC_LOAD_I32_S: case WebAssembly::ATOMIC_LOAD32_U_I64: + case WebAssembly::ATOMIC_LOAD32_U_I64_S: + case WebAssembly::ATOMIC_STORE_I32: + case WebAssembly::ATOMIC_STORE_I32_S: + case WebAssembly::ATOMIC_STORE32_I64: + case WebAssembly::ATOMIC_STORE32_I64_S: + case WebAssembly::ATOMIC_RMW_ADD_I32: + case WebAssembly::ATOMIC_RMW_ADD_I32_S: + case WebAssembly::ATOMIC_RMW32_U_ADD_I64: + case WebAssembly::ATOMIC_RMW32_U_ADD_I64_S: + case WebAssembly::ATOMIC_RMW_SUB_I32: + case WebAssembly::ATOMIC_RMW_SUB_I32_S: + case WebAssembly::ATOMIC_RMW32_U_SUB_I64: + case WebAssembly::ATOMIC_RMW32_U_SUB_I64_S: + case WebAssembly::ATOMIC_RMW_AND_I32: + case WebAssembly::ATOMIC_RMW_AND_I32_S: + case WebAssembly::ATOMIC_RMW32_U_AND_I64: + case WebAssembly::ATOMIC_RMW32_U_AND_I64_S: + case WebAssembly::ATOMIC_RMW_OR_I32: + case WebAssembly::ATOMIC_RMW_OR_I32_S: + case WebAssembly::ATOMIC_RMW32_U_OR_I64: + case WebAssembly::ATOMIC_RMW32_U_OR_I64_S: + case WebAssembly::ATOMIC_RMW_XOR_I32: + case WebAssembly::ATOMIC_RMW_XOR_I32_S: + case WebAssembly::ATOMIC_RMW32_U_XOR_I64: + case WebAssembly::ATOMIC_RMW32_U_XOR_I64_S: + case WebAssembly::ATOMIC_RMW_XCHG_I32: + case WebAssembly::ATOMIC_RMW_XCHG_I32_S: + case WebAssembly::ATOMIC_RMW32_U_XCHG_I64: + case WebAssembly::ATOMIC_RMW32_U_XCHG_I64_S: return 2; case WebAssembly::LOAD_I64: + case WebAssembly::LOAD_I64_S: case WebAssembly::LOAD_F64: + case WebAssembly::LOAD_F64_S: case WebAssembly::STORE_I64: + case WebAssembly::STORE_I64_S: case WebAssembly::STORE_F64: + case WebAssembly::STORE_F64_S: case WebAssembly::ATOMIC_LOAD_I64: + case WebAssembly::ATOMIC_LOAD_I64_S: + case WebAssembly::ATOMIC_STORE_I64: + case WebAssembly::ATOMIC_STORE_I64_S: + case WebAssembly::ATOMIC_RMW_ADD_I64: + case WebAssembly::ATOMIC_RMW_ADD_I64_S: + case WebAssembly::ATOMIC_RMW_SUB_I64: + case WebAssembly::ATOMIC_RMW_SUB_I64_S: + case WebAssembly::ATOMIC_RMW_AND_I64: + case WebAssembly::ATOMIC_RMW_AND_I64_S: + case WebAssembly::ATOMIC_RMW_OR_I64: + case WebAssembly::ATOMIC_RMW_OR_I64_S: + case WebAssembly::ATOMIC_RMW_XOR_I64: + case WebAssembly::ATOMIC_RMW_XOR_I64_S: + case WebAssembly::ATOMIC_RMW_XCHG_I64: + case WebAssembly::ATOMIC_RMW_XCHG_I64_S: return 3; default: llvm_unreachable("Only loads and stores have p2align values"); @@ -158,19 +281,20 @@ static const unsigned LoadP2AlignOperandNo = 1; static const unsigned StoreP2AlignOperandNo = 0; /// This is used to indicate block signatures. -enum class ExprType { - Void = -0x40, - I32 = -0x01, - I64 = -0x02, - F32 = -0x03, - F64 = -0x04, - I8x16 = -0x05, - I16x8 = -0x06, - I32x4 = -0x07, - F32x4 = -0x08, - B8x16 = -0x09, - B16x8 = -0x0a, - B32x4 = -0x0b +enum class ExprType : unsigned { + Void = 0x40, + I32 = 0x7F, + I64 = 0x7E, + F32 = 0x7D, + F64 = 0x7C, + I8x16 = 0x7B, + I16x8 = 0x7A, + I32x4 = 0x79, + F32x4 = 0x78, + B8x16 = 0x77, + B16x8 = 0x76, + B32x4 = 0x75, + ExceptRef = 0x68 }; /// Instruction opcodes emitted via means other than CodeGen. diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp index 0ca52ad651b5..5272e188e1d0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines WebAssembly-specific target streamer classes. +/// This file defines WebAssembly-specific target streamer classes. /// These are for implementing support for target-specific assembly directives. /// //===----------------------------------------------------------------------===// @@ -17,10 +17,8 @@ #include "InstPrinter/WebAssemblyInstPrinter.h" #include "WebAssemblyMCTargetDesc.h" #include "llvm/MC/MCContext.h" -#include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCSectionWasm.h" #include "llvm/MC/MCSubtargetInfo.h" -#include "llvm/MC/MCSymbolELF.h" #include "llvm/MC/MCSymbolWasm.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -31,16 +29,13 @@ WebAssemblyTargetStreamer::WebAssemblyTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {} void WebAssemblyTargetStreamer::emitValueType(wasm::ValType Type) { - Streamer.EmitSLEB128IntValue(int32_t(Type)); + Streamer.EmitIntValue(uint8_t(Type), 1); } WebAssemblyTargetAsmStreamer::WebAssemblyTargetAsmStreamer( MCStreamer &S, formatted_raw_ostream &OS) : WebAssemblyTargetStreamer(S), OS(OS) {} -WebAssemblyTargetELFStreamer::WebAssemblyTargetELFStreamer(MCStreamer &S) - : WebAssemblyTargetStreamer(S) {} - WebAssemblyTargetWasmStreamer::WebAssemblyTargetWasmStreamer(MCStreamer &S) : WebAssemblyTargetStreamer(S) {} @@ -87,27 +82,6 @@ void WebAssemblyTargetAsmStreamer::emitLocal(ArrayRef<MVT> Types) { } } -void WebAssemblyTargetAsmStreamer::emitGlobal( - ArrayRef<wasm::Global> Globals) { - if (!Globals.empty()) { - OS << "\t.globalvar \t"; - - bool First = true; - for (const wasm::Global &G : Globals) { - if (First) - First = false; - else - OS << ", "; - OS << WebAssembly::TypeToString(G.Type); - if (!G.InitialModule.empty()) - OS << '=' << G.InitialModule << ':' << G.InitialName; - else - OS << '=' << G.InitialValue; - } - OS << '\n'; - } -} - void WebAssemblyTargetAsmStreamer::emitEndFunc() { OS << "\t.endfunc\n"; } void WebAssemblyTargetAsmStreamer::emitIndirectFunctionType( @@ -128,46 +102,13 @@ void WebAssemblyTargetAsmStreamer::emitGlobalImport(StringRef name) { OS << "\t.import_global\t" << name << '\n'; } -void WebAssemblyTargetAsmStreamer::emitIndIdx(const MCExpr *Value) { - OS << "\t.indidx \t" << *Value << '\n'; -} - -void WebAssemblyTargetELFStreamer::emitParam(MCSymbol *Symbol, - ArrayRef<MVT> Types) { - // Nothing to emit; params are declared as part of the function signature. -} - -void WebAssemblyTargetELFStreamer::emitResult(MCSymbol *Symbol, - ArrayRef<MVT> Types) { - // Nothing to emit; results are declared as part of the function signature. -} - -void WebAssemblyTargetELFStreamer::emitLocal(ArrayRef<MVT> Types) { - Streamer.EmitULEB128IntValue(Types.size()); - for (MVT Type : Types) - emitValueType(WebAssembly::toValType(Type)); -} - -void WebAssemblyTargetELFStreamer::emitGlobal( - ArrayRef<wasm::Global> Globals) { - llvm_unreachable(".globalvar encoding not yet implemented"); -} - -void WebAssemblyTargetELFStreamer::emitEndFunc() { - Streamer.EmitIntValue(WebAssembly::End, 1); -} - -void WebAssemblyTargetELFStreamer::emitIndIdx(const MCExpr *Value) { - llvm_unreachable(".indidx encoding not yet implemented"); -} - -void WebAssemblyTargetELFStreamer::emitIndirectFunctionType( - MCSymbol *Symbol, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) { - // Nothing to emit here. TODO: Re-design how linking works and re-evaluate - // whether it's necessary for .o files to declare indirect function types. +void WebAssemblyTargetAsmStreamer::emitImportModule(MCSymbolWasm *Sym, + StringRef ModuleName) { + OS << "\t.import_module\t" << Sym->getName() << ", " << ModuleName << '\n'; } -void WebAssemblyTargetELFStreamer::emitGlobalImport(StringRef name) { +void WebAssemblyTargetAsmStreamer::emitIndIdx(const MCExpr *Value) { + OS << "\t.indidx \t" << *Value << '\n'; } void WebAssemblyTargetWasmStreamer::emitParam(MCSymbol *Symbol, @@ -204,31 +145,6 @@ void WebAssemblyTargetWasmStreamer::emitLocal(ArrayRef<MVT> Types) { } } -void WebAssemblyTargetWasmStreamer::emitGlobal( - ArrayRef<wasm::Global> Globals) { - // Encode the globals use by the funciton into the special .global_variables - // section. This will later be decoded and turned into contents for the - // Globals Section. - Streamer.PushSection(); - Streamer.SwitchSection(Streamer.getContext().getWasmSection( - ".global_variables", SectionKind::getMetadata())); - for (const wasm::Global &G : Globals) { - Streamer.EmitIntValue(int32_t(G.Type), 1); - Streamer.EmitIntValue(G.Mutable, 1); - if (G.InitialModule.empty()) { - Streamer.EmitIntValue(0, 1); // indicate that we have an int value - Streamer.EmitSLEB128IntValue(0); - } else { - Streamer.EmitIntValue(1, 1); // indicate that we have a module import - Streamer.EmitBytes(G.InitialModule); - Streamer.EmitIntValue(0, 1); // nul-terminate - Streamer.EmitBytes(G.InitialName); - Streamer.EmitIntValue(0, 1); // nul-terminate - } - } - Streamer.PopSection(); -} - void WebAssemblyTargetWasmStreamer::emitEndFunc() { llvm_unreachable(".end_func is not needed for direct wasm output"); } @@ -256,9 +172,14 @@ void WebAssemblyTargetWasmStreamer::emitIndirectFunctionType( WasmSym->setParams(std::move(ValParams)); WasmSym->setReturns(std::move(ValResults)); - WasmSym->setIsFunction(true); + WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); } void WebAssemblyTargetWasmStreamer::emitGlobalImport(StringRef name) { llvm_unreachable(".global_import is not needed for direct wasm output"); } + +void WebAssemblyTargetWasmStreamer::emitImportModule(MCSymbolWasm *Sym, + StringRef ModuleName) { + Sym->setModuleName(ModuleName); +} diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h index 2cb21a20580b..cafcb04ccd11 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares WebAssembly-specific target streamer classes. +/// This file declares WebAssembly-specific target streamer classes. /// These are for implementing support for target-specific assembly directives. /// //===----------------------------------------------------------------------===// @@ -17,13 +17,13 @@ #define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYTARGETSTREAMER_H #include "llvm/BinaryFormat/Wasm.h" -#include "llvm/CodeGen/MachineValueType.h" #include "llvm/MC/MCStreamer.h" +#include "llvm/Support/MachineValueType.h" namespace llvm { -class MCELFStreamer; class MCWasmStreamer; +class MCSymbolWasm; /// WebAssembly-specific streamer interface, to implement support /// WebAssembly-specific assembly directives. @@ -37,8 +37,6 @@ public: virtual void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) = 0; /// .local virtual void emitLocal(ArrayRef<MVT> Types) = 0; - /// .globalvar - virtual void emitGlobal(ArrayRef<wasm::Global> Globals) = 0; /// .endfunc virtual void emitEndFunc() = 0; /// .functype @@ -49,6 +47,8 @@ public: virtual void emitIndIdx(const MCExpr *Value) = 0; /// .import_global virtual void emitGlobalImport(StringRef name) = 0; + /// .import_module + virtual void emitImportModule(MCSymbolWasm *Sym, StringRef ModuleName) = 0; protected: void emitValueType(wasm::ValType Type); @@ -64,30 +64,13 @@ public: void emitParam(MCSymbol *Symbol, ArrayRef<MVT> Types) override; void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) override; void emitLocal(ArrayRef<MVT> Types) override; - void emitGlobal(ArrayRef<wasm::Global> Globals) override; - void emitEndFunc() override; - void emitIndirectFunctionType(MCSymbol *Symbol, - SmallVectorImpl<MVT> &Params, - SmallVectorImpl<MVT> &Results) override; - void emitIndIdx(const MCExpr *Value) override; - void emitGlobalImport(StringRef name) override; -}; - -/// This part is for ELF object output -class WebAssemblyTargetELFStreamer final : public WebAssemblyTargetStreamer { -public: - explicit WebAssemblyTargetELFStreamer(MCStreamer &S); - - void emitParam(MCSymbol *Symbol, ArrayRef<MVT> Types) override; - void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) override; - void emitLocal(ArrayRef<MVT> Types) override; - void emitGlobal(ArrayRef<wasm::Global> Globals) override; void emitEndFunc() override; void emitIndirectFunctionType(MCSymbol *Symbol, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) override; void emitIndIdx(const MCExpr *Value) override; void emitGlobalImport(StringRef name) override; + void emitImportModule(MCSymbolWasm *Sym, StringRef ModuleName) override; }; /// This part is for Wasm object output @@ -98,13 +81,13 @@ public: void emitParam(MCSymbol *Symbol, ArrayRef<MVT> Types) override; void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) override; void emitLocal(ArrayRef<MVT> Types) override; - void emitGlobal(ArrayRef<wasm::Global> Globals) override; void emitEndFunc() override; void emitIndirectFunctionType(MCSymbol *Symbol, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) override; void emitIndIdx(const MCExpr *Value) override; void emitGlobalImport(StringRef name) override; + void emitImportModule(MCSymbolWasm *Sym, StringRef ModuleName) override; }; } // end namespace llvm diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp index 39abde26df7f..4fb12d40b01b 100644 --- a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file handles Wasm-specific object emission, converting LLVM's +/// This file handles Wasm-specific object emission, converting LLVM's /// internal fixups into the appropriate relocations. /// //===----------------------------------------------------------------------===// @@ -20,9 +20,10 @@ #include "llvm/MC/MCFixup.h" #include "llvm/MC/MCFixupKindInfo.h" #include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSectionWasm.h" #include "llvm/MC/MCSymbolWasm.h" -#include "llvm/MC/MCWasmObjectWriter.h" #include "llvm/MC/MCValue.h" +#include "llvm/MC/MCWasmObjectWriter.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -61,6 +62,25 @@ static bool IsFunctionType(const MCValue &Target) { return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX; } +static const MCSection *GetFixupSection(const MCExpr *Expr) { + if (auto SyExp = dyn_cast<MCSymbolRefExpr>(Expr)) { + if (SyExp->getSymbol().isInSection()) + return &SyExp->getSymbol().getSection(); + return nullptr; + } + + if (auto BinOp = dyn_cast<MCBinaryExpr>(Expr)) { + auto SectionLHS = GetFixupSection(BinOp->getLHS()); + auto SectionRHS = GetFixupSection(BinOp->getRHS()); + return SectionLHS == SectionRHS ? nullptr : SectionLHS; + } + + if (auto UnOp = dyn_cast<MCUnaryExpr>(Expr)) + return GetFixupSection(UnOp->getSubExpr()); + + return nullptr; +} + unsigned WebAssemblyWasmObjectWriter::getRelocType(const MCValue &Target, const MCFixup &Fixup) const { @@ -86,6 +106,13 @@ WebAssemblyWasmObjectWriter::getRelocType(const MCValue &Target, case FK_Data_4: if (IsFunction) return wasm::R_WEBASSEMBLY_TABLE_INDEX_I32; + if (auto Section = static_cast<const MCSectionWasm *>( + GetFixupSection(Fixup.getValue()))) { + if (Section->getKind().isText()) + return wasm::R_WEBASSEMBLY_FUNCTION_OFFSET_I32; + else if (!Section->isWasmData()) + return wasm::R_WEBASSEMBLY_SECTION_OFFSET_I32; + } return wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32; case FK_Data_8: llvm_unreachable("FK_Data_8 not implemented yet"); @@ -94,9 +121,7 @@ WebAssemblyWasmObjectWriter::getRelocType(const MCValue &Target, } } -std::unique_ptr<MCObjectWriter> -llvm::createWebAssemblyWasmObjectWriter(raw_pwrite_stream &OS, - bool Is64Bit) { - auto MOTW = llvm::make_unique<WebAssemblyWasmObjectWriter>(Is64Bit); - return createWasmObjectWriter(std::move(MOTW), OS); +std::unique_ptr<MCObjectTargetWriter> +llvm::createWebAssemblyWasmObjectWriter(bool Is64Bit) { + return llvm::make_unique<WebAssemblyWasmObjectWriter>(Is64Bit); } diff --git a/contrib/llvm/lib/Target/WebAssembly/README.txt b/contrib/llvm/lib/Target/WebAssembly/README.txt index 3433b1553e8c..ef0099f07efb 100644 --- a/contrib/llvm/lib/Target/WebAssembly/README.txt +++ b/contrib/llvm/lib/Target/WebAssembly/README.txt @@ -2,15 +2,42 @@ This WebAssembly backend is presently under development. -Currently the easiest way to use it is through Emscripten, which provides a -compilation environment that includes standard libraries, tools, and packaging -for producing WebAssembly applications that can run in browsers and other -environments. For more information, see the Emscripten documentation in -general, and this page in particular: +The most notable feature which is not yet stable is the ".o" file format. +".o" file support is needed for many common ways of using LLVM, such as +using it through "clang -c", so this backend is not yet considered widely +usable. However, this backend is usable within some language toolchain +packages: + +Emscripten provides a C/C++ compilation environment that includes standard +libraries, tools, and packaging for producing WebAssembly applications that +can run in browsers and other environments. For more information, see the +Emscripten documentation in general, and this page in particular: + * https://github.com/kripken/emscripten/wiki/New-WebAssembly-Backend + +Rust provides WebAssembly support integrated into Cargo. There are two +main options: + - wasm32-unknown-unknown, which provides a relatively minimal environment + that has an emphasis on being "native" + - wasm32-unknown-emscripten, which uses Emscripten internally and + provides standard C/C++ libraries, filesystem emulation, GL and SDL + bindings +For more information, see: + * https://www.hellorust.com/ + + +This backend does not yet support debug info. Full DWARF support needs a +design for how DWARF should be represented in WebAssembly. Sourcemap support +has an existing design and some corresponding browser implementations, so it +just needs implementing in LLVM. -Other ways of using this backend, such as via a standalone "clang", are also -under development, though they are not generally usable yet. +Work-in-progress documentation for the ".o" file format is here: + + * https://github.com/WebAssembly/tool-conventions/blob/master/Linking.md + +A corresponding linker implementation is also under development: + + * https://lld.llvm.org/WebAssembly.html For more information on WebAssembly itself, see the home page: * https://webassembly.github.io/ @@ -30,6 +57,8 @@ turn red if not. Once most of these pass, further testing will use LLVM's own test suite. The tests can be run locally using: https://github.com/WebAssembly/waterfall/blob/master/src/compile_torture_tests.py +Some notes on ways that the generated code could be improved follow: + //===---------------------------------------------------------------------===// Br, br_if, and br_table instructions can support having a value on the value @@ -127,7 +156,7 @@ However, if moving the binary operator to its user moves it to a place where its operands can't be moved to, it would be better to leave it in place, or perhaps move it up, so that it can stackify its operands. A binary operator has two operands and one result, so in such cases there could be a net win by -prefering the operands. +preferring the operands. //===---------------------------------------------------------------------===// @@ -138,11 +167,10 @@ instructions advantageously for this purpose. //===---------------------------------------------------------------------===// -WebAssembly is now officially a stack machine, rather than an AST, and this -comes with additional opportunities for WebAssemblyRegStackify. Specifically, -the stack doesn't need to be empty after an instruction with no return values. -WebAssemblyRegStackify could be extended, or possibly rewritten, to take -advantage of the new opportunities. +WebAssemblyRegStackify currently assumes that the stack must be empty after +an instruction with no return values, however wasm doesn't actually require +this. WebAssemblyRegStackify could be extended, or possibly rewritten, to take +full advantage of what WebAssembly permits. //===---------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp index a2c03b1a0400..f7a417c0ed49 100644 --- a/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file registers the WebAssembly target. +/// This file registers the WebAssembly target. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h index 7ac6c3991531..05b7b21fb597 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the entry points for global functions defined in +/// This file contains the entry points for global functions defined in /// the LLVM WebAssembly back-end. /// //===----------------------------------------------------------------------===// @@ -27,8 +27,8 @@ class FunctionPass; // LLVM IR passes. ModulePass *createWebAssemblyLowerEmscriptenEHSjLj(bool DoEH, bool DoSjLj); -void initializeWebAssemblyLowerEmscriptenEHSjLjPass(PassRegistry &); ModulePass *createWebAssemblyLowerGlobalDtors(); +ModulePass *createWebAssemblyAddMissingPrototypes(); ModulePass *createWebAssemblyFixFunctionBitcasts(); FunctionPass *createWebAssemblyOptimizeReturned(); @@ -47,6 +47,7 @@ FunctionPass *createWebAssemblyRegStackify(); FunctionPass *createWebAssemblyRegColoring(); FunctionPass *createWebAssemblyExplicitLocals(); FunctionPass *createWebAssemblyFixIrreducibleControlFlow(); +FunctionPass *createWebAssemblyLateEHPrepare(); FunctionPass *createWebAssemblyCFGSort(); FunctionPass *createWebAssemblyCFGStackify(); FunctionPass *createWebAssemblyLowerBrUnless(); @@ -54,6 +55,31 @@ FunctionPass *createWebAssemblyRegNumbering(); FunctionPass *createWebAssemblyPeephole(); FunctionPass *createWebAssemblyCallIndirectFixup(); +// PassRegistry initialization declarations. +void initializeWebAssemblyAddMissingPrototypesPass(PassRegistry &); +void initializeWebAssemblyLowerEmscriptenEHSjLjPass(PassRegistry &); +void initializeLowerGlobalDtorsPass(PassRegistry &); +void initializeFixFunctionBitcastsPass(PassRegistry &); +void initializeOptimizeReturnedPass(PassRegistry &); +void initializeWebAssemblyArgumentMovePass(PassRegistry &); +void initializeWebAssemblySetP2AlignOperandsPass(PassRegistry &); +void initializeWebAssemblyReplacePhysRegsPass(PassRegistry &); +void initializeWebAssemblyPrepareForLiveIntervalsPass(PassRegistry &); +void initializeWebAssemblyOptimizeLiveIntervalsPass(PassRegistry &); +void initializeWebAssemblyStoreResultsPass(PassRegistry &); +void initializeWebAssemblyRegStackifyPass(PassRegistry &); +void initializeWebAssemblyRegColoringPass(PassRegistry &); +void initializeWebAssemblyExplicitLocalsPass(PassRegistry &); +void initializeWebAssemblyFixIrreducibleControlFlowPass(PassRegistry &); +void initializeWebAssemblyLateEHPreparePass(PassRegistry &); +void initializeWebAssemblyExceptionInfoPass(PassRegistry &); +void initializeWebAssemblyCFGSortPass(PassRegistry &); +void initializeWebAssemblyCFGStackifyPass(PassRegistry &); +void initializeWebAssemblyLowerBrUnlessPass(PassRegistry &); +void initializeWebAssemblyRegNumberingPass(PassRegistry &); +void initializeWebAssemblyPeepholePass(PassRegistry &); +void initializeWebAssemblyCallIndirectFixupPass(PassRegistry &); + } // end namespace llvm #endif diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td index 99cf1f119a20..2f301da8e422 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This is a target description file for the WebAssembly architecture, +/// This is a target description file for the WebAssembly architecture, /// which is also known as "wasm". /// //===----------------------------------------------------------------------===// @@ -32,6 +32,15 @@ def FeatureNontrappingFPToInt : "HasNontrappingFPToInt", "true", "Enable non-trapping float-to-int conversion operators">; +def FeatureSignExt : + SubtargetFeature<"sign-ext", + "HasSignExt", "true", + "Enable sign extension operators">; + +def FeatureExceptionHandling : + SubtargetFeature<"exception-handling", "HasExceptionHandling", "true", + "Enable Wasm exception handling">; + //===----------------------------------------------------------------------===// // Architectures. //===----------------------------------------------------------------------===// @@ -68,6 +77,20 @@ def : ProcessorModel<"bleeding-edge", NoSchedModel, // Target Declaration //===----------------------------------------------------------------------===// +def WebAssemblyAsmParser : AsmParser { + // The physical register names are not in the binary format or asm text + let ShouldEmitMatchRegisterName = 0; +} + +def WebAssemblyAsmWriter : AsmWriter { + string AsmWriterClassName = "InstPrinter"; + int PassSubtarget = 0; + int Variant = 0; + bit isMCAsmWriter = 1; +} + def WebAssembly : Target { let InstructionSet = WebAssemblyInstrInfo; + let AssemblyParsers = [WebAssemblyAsmParser]; + let AssemblyWriters = [WebAssemblyAsmWriter]; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp new file mode 100644 index 000000000000..4af9cd150bf7 --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAddMissingPrototypes.cpp @@ -0,0 +1,144 @@ +//===-- WebAssemblyAddMissingPrototypes.cpp - Fix prototypeless functions -===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Add prototypes to prototypes-less functions. +/// +/// WebAssembly has strict function prototype checking so we need functions +/// declarations to match the call sites. Clang treats prototype-less functions +/// as varargs (foo(...)) which happens to work on existing platforms but +/// doesn't under WebAssembly. This pass will find all the call sites of each +/// prototype-less function, ensure they agree, and then set the signature +/// on the function declaration accordingly. +/// +//===----------------------------------------------------------------------===// + +#include "WebAssembly.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/Transforms/Utils/ModuleUtils.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Pass.h" +#include "llvm/Support/Debug.h" +using namespace llvm; + +#define DEBUG_TYPE "wasm-add-missing-prototypes" + +namespace { +class WebAssemblyAddMissingPrototypes final : public ModulePass { + StringRef getPassName() const override { + return "Add prototypes to prototypes-less functions"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + ModulePass::getAnalysisUsage(AU); + } + + bool runOnModule(Module &M) override; + +public: + static char ID; + WebAssemblyAddMissingPrototypes() : ModulePass(ID) {} +}; +} // End anonymous namespace + +char WebAssemblyAddMissingPrototypes::ID = 0; +INITIALIZE_PASS(WebAssemblyAddMissingPrototypes, DEBUG_TYPE, + "Add prototypes to prototypes-less functions", false, false) + +ModulePass *llvm::createWebAssemblyAddMissingPrototypes() { + return new WebAssemblyAddMissingPrototypes(); +} + +bool WebAssemblyAddMissingPrototypes::runOnModule(Module &M) { + LLVM_DEBUG(dbgs() << "runnning AddMissingPrototypes\n"); + + std::vector<std::pair<Function*, Function*>> Replacements; + + // Find all the prototype-less function declarations + for (Function &F : M) { + if (!F.isDeclaration() || !F.hasFnAttribute("no-prototype")) + continue; + + LLVM_DEBUG(dbgs() << "Found no-prototype function: " << F.getName() << "\n"); + + // When clang emits prototype-less C functions it uses (...), i.e. varargs + // function that take no arguments (have no sentinel). When we see a + // no-prototype attribute we expect the function have these properties. + if (!F.isVarArg()) + report_fatal_error( + "Functions with 'no-prototype' attribute must take varargs: " + + F.getName()); + if (F.getFunctionType()->getNumParams() != 0) + report_fatal_error( + "Functions with 'no-prototype' attribute should not have params: " + + F.getName()); + + + // Create a function prototype based on the first call site (first bitcast) + // that we find. + FunctionType *NewType = nullptr; + Function* NewF = nullptr; + for (Use &U : F.uses()) { + LLVM_DEBUG(dbgs() << "prototype-less use: " << F.getName() << "\n"); + if (BitCastOperator *BC = dyn_cast<BitCastOperator>(U.getUser())) { + FunctionType *DestType = + cast<FunctionType>(BC->getDestTy()->getPointerElementType()); + + // Create a new function with the correct type + NewType = DestType; + NewF = Function::Create(NewType, F.getLinkage(), F.getName()); + NewF->setAttributes(F.getAttributes()); + NewF->removeFnAttr("no-prototype"); + break; + } + } + + if (!NewType) { + LLVM_DEBUG( + dbgs() << "could not derive a function prototype from usage: " + + F.getName() + "\n"); + continue; + } + + for (Use &U : F.uses()) { + if (BitCastOperator *BC = dyn_cast<BitCastOperator>(U.getUser())) { + FunctionType *DestType = + cast<FunctionType>(BC->getDestTy()->getPointerElementType()); + if (NewType != DestType) { + report_fatal_error( + "Prototypeless function used with conflicting signatures: " + + F.getName()); + } + BC->replaceAllUsesWith(NewF); + Replacements.emplace_back(&F, NewF); + } else { + dbgs() << *U.getUser()->getType() << "\n"; +#ifndef NDEBUG + U.getUser()->dump(); +#endif + report_fatal_error( + "unexpected use of prototypeless function: " + F.getName() + "\n"); + } + } + } + + // Finally replace the old function declarations with the new ones + for (auto &Pair : Replacements) { + Function* Old = Pair.first; + Function* New = Pair.second; + Old->eraseFromParent(); + M.getFunctionList().push_back(New); + } + + return !Replacements.empty(); +} diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp index 5fadca38b820..7c8a631cde8a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file moves ARGUMENT instructions after ScheduleDAG scheduling. +/// This file moves ARGUMENT instructions after ScheduleDAG scheduling. /// /// Arguments are really live-in registers, however, since we use virtual /// registers and LLVM doesn't support live-in virtual registers, we're @@ -60,12 +60,15 @@ public: } // end anonymous namespace char WebAssemblyArgumentMove::ID = 0; +INITIALIZE_PASS(WebAssemblyArgumentMove, DEBUG_TYPE, + "Move ARGUMENT instructions for WebAssembly", false, false) + FunctionPass *llvm::createWebAssemblyArgumentMove() { return new WebAssemblyArgumentMove(); } bool WebAssemblyArgumentMove::runOnMachineFunction(MachineFunction &MF) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "********** Argument Move **********\n" << "********** Function: " << MF.getName() << '\n'; }); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp index 204d97cbdd44..1f280e1d13fc 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains a printer that converts from our internal +/// This file contains a printer that converts from our internal /// representation of machine-dependent LLVM code to the WebAssembly assembly /// language. /// @@ -31,10 +31,10 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSectionWasm.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbolWasm.h" -#include "llvm/MC/MCSymbolELF.h" #include "llvm/Support/Debug.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" @@ -53,7 +53,7 @@ MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const { MVT::v4i32, MVT::v4f32}) if (TRI->isTypeLegalForClass(*TRC, T)) return T; - DEBUG(errs() << "Unknown type for register number: " << RegNo); + LLVM_DEBUG(errs() << "Unknown type for register number: " << RegNo); llvm_unreachable("Unknown register type"); return MVT::Other; } @@ -84,21 +84,47 @@ void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) { SmallVector<MVT, 4> Results; SmallVector<MVT, 4> Params; ComputeSignatureVTs(F, TM, Params, Results); - getTargetStreamer()->emitIndirectFunctionType(getSymbol(&F), Params, - Results); + MCSymbol *Sym = getSymbol(&F); + getTargetStreamer()->emitIndirectFunctionType(Sym, Params, Results); + + if (TM.getTargetTriple().isOSBinFormatWasm() && + F.hasFnAttribute("wasm-import-module")) { + MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym); + StringRef Name = F.getFnAttribute("wasm-import-module") + .getValueAsString(); + getTargetStreamer()->emitImportModule(WasmSym, Name); + } } } for (const auto &G : M.globals()) { if (!G.hasInitializer() && G.hasExternalLinkage()) { if (G.getValueType()->isSized()) { uint16_t Size = M.getDataLayout().getTypeAllocSize(G.getValueType()); - if (TM.getTargetTriple().isOSBinFormatELF()) - getTargetStreamer()->emitGlobalImport(G.getGlobalIdentifier()); OutStreamer->emitELFSize(getSymbol(&G), MCConstantExpr::create(Size, OutContext)); } } } + + if (const NamedMDNode *Named = M.getNamedMetadata("wasm.custom_sections")) { + for (const Metadata *MD : Named->operands()) { + const MDTuple *Tuple = dyn_cast<MDTuple>(MD); + if (!Tuple || Tuple->getNumOperands() != 2) + continue; + const MDString *Name = dyn_cast<MDString>(Tuple->getOperand(0)); + const MDString *Contents = dyn_cast<MDString>(Tuple->getOperand(1)); + if (!Name || !Contents) + continue; + + OutStreamer->PushSection(); + std::string SectionName = (".custom_section." + Name->getString()).str(); + MCSectionWasm *mySection = + OutContext.getWasmSection(SectionName, SectionKind::getMetadata()); + OutStreamer->SwitchSection(mySection); + OutStreamer->EmitBytes(Contents->getString()); + OutStreamer->PopSection(); + } + } } void WebAssemblyAsmPrinter::EmitConstantPool() { @@ -133,36 +159,13 @@ void WebAssemblyAsmPrinter::EmitFunctionBodyStart() { else getTargetStreamer()->emitResult(CurrentFnSym, ArrayRef<MVT>()); - if (TM.getTargetTriple().isOSBinFormatELF()) { - assert(MFI->getLocals().empty()); - for (unsigned Idx = 0, IdxE = MRI->getNumVirtRegs(); Idx != IdxE; ++Idx) { - unsigned VReg = TargetRegisterInfo::index2VirtReg(Idx); - unsigned WAReg = MFI->getWAReg(VReg); - // Don't declare unused registers. - if (WAReg == WebAssemblyFunctionInfo::UnusedReg) - continue; - // Don't redeclare parameters. - if (WAReg < MFI->getParams().size()) - continue; - // Don't declare stackified registers. - if (int(WAReg) < 0) - continue; - MFI->addLocal(getRegType(VReg)); - } - } - getTargetStreamer()->emitLocal(MFI->getLocals()); AsmPrinter::EmitFunctionBodyStart(); } -void WebAssemblyAsmPrinter::EmitFunctionBodyEnd() { - if (TM.getTargetTriple().isOSBinFormatELF()) - getTargetStreamer()->emitEndFunc(); -} - void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) { - DEBUG(dbgs() << "EmitInstruction: " << *MI << '\n'); + LLVM_DEBUG(dbgs() << "EmitInstruction: " << *MI << '\n'); switch (MI->getOpcode()) { case WebAssembly::ARGUMENT_I32: diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h index a37f8bcf6ba5..23817b4e5126 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h @@ -57,7 +57,6 @@ public: void EmitJumpTableInfo() override; void EmitConstantPool() override; void EmitFunctionBodyStart() override; - void EmitFunctionBodyEnd() override; void EmitInstruction(const MachineInstr *MI) override; const MCExpr *lowerConstant(const Constant *CV) override; bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp index 700111743ee8..267a51433cd1 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a CFG sorting pass. +/// This file implements a CFG sorting pass. /// /// This pass reorders the blocks in a function to put them into topological /// order, ignoring loop backedges, and without any loop being interrupted @@ -56,6 +56,9 @@ public: } // end anonymous namespace char WebAssemblyCFGSort::ID = 0; +INITIALIZE_PASS(WebAssemblyCFGSort, DEBUG_TYPE, + "Reorders blocks in topological order", false, false) + FunctionPass *llvm::createWebAssemblyCFGSort() { return new WebAssemblyCFGSort(); } @@ -250,7 +253,7 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, assert(OnStack.count(MLI.getLoopFor(&MBB)) && "Blocks must be nested in their loops"); } - while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back())) + while (OnStack.size() > 1 && &MBB == WebAssembly::getBottom(OnStack.back())) OnStack.pop_back(); } assert(OnStack.pop_back_val() == nullptr && @@ -261,9 +264,9 @@ static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI, } bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** CFG Sorting **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** CFG Sorting **********\n" + "********** Function: " + << MF.getName() << '\n'); const auto &MLI = getAnalysis<MachineLoopInfo>(); auto &MDT = getAnalysis<MachineDominatorTree>(); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp index 21e0f6b23777..70ce40cefed7 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a CFG stacking pass. +/// This file implements a CFG stacking pass. /// /// This pass inserts BLOCK and LOOP markers to mark the start of scopes, since /// scope boundaries serve as the labels for WebAssembly's control transfers. @@ -57,6 +57,10 @@ public: } // end anonymous namespace char WebAssemblyCFGStackify::ID = 0; +INITIALIZE_PASS(WebAssemblyCFGStackify, DEBUG_TYPE, + "Insert BLOCK and LOOP markers for WebAssembly scopes", + false, false) + FunctionPass *llvm::createWebAssemblyCFGStackify() { return new WebAssemblyCFGStackify(); } @@ -123,7 +127,8 @@ static void PlaceBlockMarker( // Decide where in Header to put the BLOCK. MachineBasicBlock::iterator InsertPos; MachineLoop *HeaderLoop = MLI.getLoopFor(Header); - if (HeaderLoop && MBB.getNumber() > LoopBottom(HeaderLoop)->getNumber()) { + if (HeaderLoop && + MBB.getNumber() > WebAssembly::getBottom(HeaderLoop)->getNumber()) { // Header is the header of a loop that does not lexically contain MBB, so // the BLOCK needs to be above the LOOP, after any END constructs. InsertPos = Header->begin(); @@ -143,9 +148,10 @@ static void PlaceBlockMarker( } // Add the BLOCK. - MachineInstr *Begin = BuildMI(*Header, InsertPos, DebugLoc(), - TII.get(WebAssembly::BLOCK)) - .addImm(int64_t(WebAssembly::ExprType::Void)); + MachineInstr *Begin = + BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos), + TII.get(WebAssembly::BLOCK)) + .addImm(int64_t(WebAssembly::ExprType::Void)); // Mark the end of the block. InsertPos = MBB.begin(); @@ -153,7 +159,7 @@ static void PlaceBlockMarker( InsertPos->getOpcode() == WebAssembly::END_LOOP && LoopTops[&*InsertPos]->getParent()->getNumber() >= Header->getNumber()) ++InsertPos; - MachineInstr *End = BuildMI(MBB, InsertPos, DebugLoc(), + MachineInstr *End = BuildMI(MBB, InsertPos, MBB.findPrevDebugLoc(InsertPos), TII.get(WebAssembly::END_BLOCK)); BlockTops[End] = Begin; @@ -176,7 +182,7 @@ static void PlaceLoopMarker( // The operand of a LOOP is the first block after the loop. If the loop is the // bottom of the function, insert a dummy block at the end. - MachineBasicBlock *Bottom = LoopBottom(Loop); + MachineBasicBlock *Bottom = WebAssembly::getBottom(Loop); auto Iter = std::next(MachineFunction::iterator(Bottom)); if (Iter == MF.end()) { MachineBasicBlock *Label = MF.CreateMachineBasicBlock(); @@ -193,12 +199,14 @@ static void PlaceLoopMarker( while (InsertPos != MBB.end() && InsertPos->getOpcode() == WebAssembly::END_LOOP) ++InsertPos; - MachineInstr *Begin = BuildMI(MBB, InsertPos, DebugLoc(), + MachineInstr *Begin = BuildMI(MBB, InsertPos, MBB.findDebugLoc(InsertPos), TII.get(WebAssembly::LOOP)) - .addImm(int64_t(WebAssembly::ExprType::Void)); + .addImm(int64_t(WebAssembly::ExprType::Void)); - // Mark the end of the loop. - MachineInstr *End = BuildMI(*AfterLoop, AfterLoop->begin(), DebugLoc(), + // Mark the end of the loop (using arbitrary debug location that branched + // to the loop end as its location). + DebugLoc EndDL = (*AfterLoop->pred_rbegin())->findBranchDebugLoc(); + MachineInstr *End = BuildMI(*AfterLoop, AfterLoop->begin(), EndDL, TII.get(WebAssembly::END_LOOP)); LoopTops[End] = Begin; @@ -249,12 +257,13 @@ static void FixEndsAtEndOfFunction( case MVT::v8i16: retType = WebAssembly::ExprType::I16x8; break; case MVT::v4i32: retType = WebAssembly::ExprType::I32x4; break; case MVT::v4f32: retType = WebAssembly::ExprType::F32x4; break; + case MVT::ExceptRef: retType = WebAssembly::ExprType::ExceptRef; break; default: llvm_unreachable("unexpected return type"); } for (MachineBasicBlock &MBB : reverse(MF)) { for (MachineInstr &MI : reverse(MBB)) { - if (MI.isPosition() || MI.isDebugValue()) + if (MI.isPosition() || MI.isDebugInstr()) continue; if (MI.getOpcode() == WebAssembly::END_BLOCK) { BlockTops[&MI]->getOperand(0).setImm(int32_t(retType)); @@ -275,7 +284,8 @@ static void FixEndsAtEndOfFunction( static void AppendEndToFunction( MachineFunction &MF, const WebAssemblyInstrInfo &TII) { - BuildMI(MF.back(), MF.back().end(), DebugLoc(), + BuildMI(MF.back(), MF.back().end(), + MF.back().findPrevDebugLoc(MF.back().end()), TII.get(WebAssembly::END_FUNCTION)); } @@ -348,15 +358,13 @@ static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI, FixEndsAtEndOfFunction(MF, MFI, BlockTops, LoopTops); // Add an end instruction at the end of the function body. - if (!MF.getSubtarget<WebAssemblySubtarget>() - .getTargetTriple().isOSBinFormatELF()) - AppendEndToFunction(MF, TII); + AppendEndToFunction(MF, TII); } bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** CFG Stackifying **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** CFG Stackifying **********\n" + "********** Function: " + << MF.getName() << '\n'); const auto &MLI = getAnalysis<MachineLoopInfo>(); auto &MDT = getAnalysis<MachineDominatorTree>(); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp index 1af92f02d8e0..c1820bf66bc0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file converts pseudo call_indirect instructions into real +/// This file converts pseudo call_indirect instructions into real /// call_indirects. /// /// The order of arguments for a call_indirect is the arguments to the function @@ -54,6 +54,9 @@ public: } // end anonymous namespace char WebAssemblyCallIndirectFixup::ID = 0; +INITIALIZE_PASS(WebAssemblyCallIndirectFixup, DEBUG_TYPE, + "Rewrite call_indirect argument orderings", false, false) + FunctionPass *llvm::createWebAssemblyCallIndirectFixup() { return new WebAssemblyCallIndirectFixup(); } @@ -80,8 +83,8 @@ static bool IsPseudoCallIndirect(const MachineInstr &MI) { } bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** Fixing up CALL_INDIRECTs **********\n" - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Fixing up CALL_INDIRECTs **********\n" + << MF.getName() << '\n'); bool Changed = false; const WebAssemblyInstrInfo *TII = @@ -90,7 +93,7 @@ bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) { for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { if (IsPseudoCallIndirect(MI)) { - DEBUG(dbgs() << "Found call_indirect: " << MI << '\n'); + LLVM_DEBUG(dbgs() << "Found call_indirect: " << MI << '\n'); // Rewrite pseudo to non-pseudo const MCInstrDesc &Desc = TII->get(GetNonPseudoCallIndirectOpcode(MI)); @@ -120,13 +123,13 @@ bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) { for (const MachineOperand &MO : Ops) MI.addOperand(MO); - DEBUG(dbgs() << " After transform: " << MI); + LLVM_DEBUG(dbgs() << " After transform: " << MI); Changed = true; } } } - DEBUG(dbgs() << "\nDone fixing up CALL_INDIRECTs\n\n"); + LLVM_DEBUG(dbgs() << "\nDone fixing up CALL_INDIRECTs\n\n"); return Changed; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp new file mode 100644 index 000000000000..84683d48a90a --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp @@ -0,0 +1,197 @@ +//===--- WebAssemblyExceptionInfo.cpp - Exception Infomation --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// \brief This file implements WebAssemblyException information analysis. +/// +//===----------------------------------------------------------------------===// + +#include "WebAssemblyExceptionInfo.h" +#include "WebAssemblyUtilities.h" +#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/CodeGen/MachineDominanceFrontier.h" +#include "llvm/CodeGen/MachineDominators.h" + +using namespace llvm; + +#define DEBUG_TYPE "wasm-exception-info" + +char WebAssemblyExceptionInfo::ID = 0; + +INITIALIZE_PASS_BEGIN(WebAssemblyExceptionInfo, DEBUG_TYPE, + "WebAssembly Exception Information", true, true) +INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) +INITIALIZE_PASS_DEPENDENCY(MachineDominanceFrontier) +INITIALIZE_PASS_END(WebAssemblyExceptionInfo, DEBUG_TYPE, + "WebAssembly Exception Information", true, true) + +bool WebAssemblyExceptionInfo::runOnMachineFunction(MachineFunction &F) { + releaseMemory(); + auto &MDT = getAnalysis<MachineDominatorTree>(); + auto &MDF = getAnalysis<MachineDominanceFrontier>(); + recalculate(MDT, MDF); + return false; +} + +void WebAssemblyExceptionInfo::recalculate( + MachineDominatorTree &MDT, const MachineDominanceFrontier &MDF) { + // Postorder traversal of the dominator tree. + SmallVector<WebAssemblyException *, 8> Exceptions; + for (auto DomNode : post_order(&MDT)) { + MachineBasicBlock *EHPad = DomNode->getBlock(); + if (!EHPad->isEHPad()) + continue; + // We group catch & catch-all terminate pads together, so skip the second + // one + if (WebAssembly::isCatchAllTerminatePad(*EHPad)) + continue; + auto *WE = new WebAssemblyException(EHPad); + discoverAndMapException(WE, MDT, MDF); + Exceptions.push_back(WE); + } + + // Add BBs to exceptions + for (auto DomNode : post_order(&MDT)) { + MachineBasicBlock *MBB = DomNode->getBlock(); + WebAssemblyException *WE = getExceptionFor(MBB); + for (; WE; WE = WE->getParentException()) + WE->addBlock(MBB); + } + + // Add subexceptions to exceptions + for (auto *WE : Exceptions) { + if (WE->getParentException()) + WE->getParentException()->getSubExceptions().push_back(WE); + else + addTopLevelException(WE); + } + + // For convenience, Blocks and SubExceptions are inserted in postorder. + // Reverse the lists. + for (auto *WE : Exceptions) { + WE->reverseBlock(); + std::reverse(WE->getSubExceptions().begin(), WE->getSubExceptions().end()); + } +} + +void WebAssemblyExceptionInfo::releaseMemory() { + BBMap.clear(); + DeleteContainerPointers(TopLevelExceptions); + TopLevelExceptions.clear(); +} + +void WebAssemblyExceptionInfo::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + AU.addRequired<MachineDominatorTree>(); + AU.addRequired<MachineDominanceFrontier>(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +void WebAssemblyExceptionInfo::discoverAndMapException( + WebAssemblyException *WE, const MachineDominatorTree &MDT, + const MachineDominanceFrontier &MDF) { + unsigned NumBlocks = 0; + unsigned NumSubExceptions = 0; + + // Map blocks that belong to a catchpad / cleanuppad + MachineBasicBlock *EHPad = WE->getEHPad(); + + // We group catch & catch-all terminate pads together within an exception + if (WebAssembly::isCatchTerminatePad(*EHPad)) { + assert(EHPad->succ_size() == 1 && + "Catch terminate pad has more than one successors"); + changeExceptionFor(EHPad, WE); + changeExceptionFor(*(EHPad->succ_begin()), WE); + return; + } + + SmallVector<MachineBasicBlock *, 8> WL; + WL.push_back(EHPad); + while (!WL.empty()) { + MachineBasicBlock *MBB = WL.pop_back_val(); + + // Find its outermost discovered exception. If this is a discovered block, + // check if it is already discovered to be a subexception of this exception. + WebAssemblyException *SubE = getOutermostException(MBB); + if (SubE) { + if (SubE != WE) { + // Discover a subexception of this exception. + SubE->setParentException(WE); + ++NumSubExceptions; + NumBlocks += SubE->getBlocksVector().capacity(); + // All blocks that belong to this subexception have been already + // discovered. Skip all of them. Add the subexception's landing pad's + // dominance frontier to the worklist. + for (auto &Frontier : MDF.find(SubE->getEHPad())->second) + if (MDT.dominates(EHPad, Frontier)) + WL.push_back(Frontier); + } + continue; + } + + // This is an undiscovered block. Map it to the current exception. + changeExceptionFor(MBB, WE); + ++NumBlocks; + + // Add successors dominated by the current BB to the worklist. + for (auto *Succ : MBB->successors()) + if (MDT.dominates(EHPad, Succ)) + WL.push_back(Succ); + } + + WE->getSubExceptions().reserve(NumSubExceptions); + WE->reserveBlocks(NumBlocks); +} + +WebAssemblyException * +WebAssemblyExceptionInfo::getOutermostException(MachineBasicBlock *MBB) const { + WebAssemblyException *WE = getExceptionFor(MBB); + if (WE) { + while (WebAssemblyException *Parent = WE->getParentException()) + WE = Parent; + } + return WE; +} + +void WebAssemblyException::print(raw_ostream &OS, unsigned Depth) const { + OS.indent(Depth * 2) << "Exception at depth " << getExceptionDepth() + << " containing: "; + + for (unsigned I = 0; I < getBlocks().size(); ++I) { + MachineBasicBlock *MBB = getBlocks()[I]; + if (I) + OS << ", "; + OS << "%bb." << MBB->getNumber(); + if (const auto *BB = MBB->getBasicBlock()) + if (BB->hasName()) + OS << "." << BB->getName(); + + if (getEHPad() == MBB) + OS << " (landing-pad)"; + } + OS << "\n"; + + for (auto &SubE : SubExceptions) + SubE->print(OS, Depth + 2); +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void WebAssemblyException::dump() const { print(dbgs()); } +#endif + +raw_ostream &operator<<(raw_ostream &OS, const WebAssemblyException &WE) { + WE.print(OS); + return OS; +} + +void WebAssemblyExceptionInfo::print(raw_ostream &OS, const Module *) const { + for (auto *WE : TopLevelExceptions) + WE->print(OS); +} diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h new file mode 100644 index 000000000000..fcd7e2366e03 --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExceptionInfo.h @@ -0,0 +1,170 @@ +//===-- WebAssemblyExceptionInfo.h - WebAssembly Exception Info -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// \brief This file implements WebAssemblyException information analysis. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYEXCEPTIONINFO_H +#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYEXCEPTIONINFO_H + +#include "WebAssembly.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/CodeGen/MachineFunctionPass.h" + +namespace llvm { + +class MachineDominatorTree; +class MachineDominanceFrontier; + +// WebAssembly instructions for exception handling are structured as follows: +// try +// instructions* +// catch ----| +// instructions* | -> A WebAssemblyException consists of this region +// end ----| +// +// A WebAssemblyException object contains BBs that belong to a 'catch' part of +// the try-catch-end structure to be created later. 'try' and 'end' markers +// are not present at this stage and will be generated in CFGStackify pass. +// Because CFGSort requires all the BBs within a catch part to be sorted +// together as it does for loops, this pass calculates the nesting structure of +// catch part of exceptions in a function. +// +// An exception catch part is defined as a BB with catch instruction and all +// other BBs dominated by this BB. +class WebAssemblyException { + MachineBasicBlock *EHPad = nullptr; + + WebAssemblyException *ParentException = nullptr; + std::vector<WebAssemblyException *> SubExceptions; + std::vector<MachineBasicBlock *> Blocks; + SmallPtrSet<const MachineBasicBlock *, 8> BlockSet; + +public: + WebAssemblyException(MachineBasicBlock *EHPad) : EHPad(EHPad) {} + ~WebAssemblyException() { DeleteContainerPointers(SubExceptions); } + WebAssemblyException(const WebAssemblyException &) = delete; + const WebAssemblyException &operator=(const WebAssemblyException &) = delete; + + MachineBasicBlock *getEHPad() const { return EHPad; } + MachineBasicBlock *getHeader() const { return EHPad; } + WebAssemblyException *getParentException() const { return ParentException; } + void setParentException(WebAssemblyException *WE) { ParentException = WE; } + + bool contains(const WebAssemblyException *WE) const { + if (WE == this) + return true; + if (!WE) + return false; + return contains(WE->getParentException()); + } + bool contains(const MachineBasicBlock *MBB) const { + return BlockSet.count(MBB); + } + + void addBlock(MachineBasicBlock *MBB) { + Blocks.push_back(MBB); + BlockSet.insert(MBB); + } + ArrayRef<MachineBasicBlock *> getBlocks() const { return Blocks; } + using block_iterator = typename ArrayRef<MachineBasicBlock *>::const_iterator; + block_iterator block_begin() const { return getBlocks().begin(); } + block_iterator block_end() const { return getBlocks().end(); } + inline iterator_range<block_iterator> blocks() const { + return make_range(block_begin(), block_end()); + } + unsigned getNumBlocks() const { return Blocks.size(); } + std::vector<MachineBasicBlock *> &getBlocksVector() { return Blocks; } + + const std::vector<WebAssemblyException *> &getSubExceptions() const { + return SubExceptions; + } + std::vector<WebAssemblyException *> &getSubExceptions() { + return SubExceptions; + } + void addSubException(WebAssemblyException *E) { SubExceptions.push_back(E); } + using iterator = typename std::vector<WebAssemblyException *>::const_iterator; + iterator begin() const { return SubExceptions.begin(); } + iterator end() const { return SubExceptions.end(); } + + void reserveBlocks(unsigned Size) { Blocks.reserve(Size); } + void reverseBlock(unsigned From = 0) { + std::reverse(Blocks.begin() + From, Blocks.end()); + } + + // Return the nesting level. An outermost one has depth 1. + unsigned getExceptionDepth() const { + unsigned D = 1; + for (const WebAssemblyException *CurException = ParentException; + CurException; CurException = CurException->ParentException) + ++D; + return D; + } + + void print(raw_ostream &OS, unsigned Depth = 0) const; + void dump() const; +}; + +raw_ostream &operator<<(raw_ostream &OS, const WebAssemblyException &WE); + +class WebAssemblyExceptionInfo final : public MachineFunctionPass { + // Mapping of basic blocks to the innermost exception they occur in + DenseMap<const MachineBasicBlock *, WebAssemblyException *> BBMap; + std::vector<WebAssemblyException *> TopLevelExceptions; + + void discoverAndMapException(WebAssemblyException *WE, + const MachineDominatorTree &MDT, + const MachineDominanceFrontier &MDF); + WebAssemblyException *getOutermostException(MachineBasicBlock *MBB) const; + +public: + static char ID; + WebAssemblyExceptionInfo() : MachineFunctionPass(ID) { + initializeWebAssemblyExceptionInfoPass(*PassRegistry::getPassRegistry()); + } + ~WebAssemblyExceptionInfo() override { releaseMemory(); } + WebAssemblyExceptionInfo(const WebAssemblyExceptionInfo &) = delete; + WebAssemblyExceptionInfo & + operator=(const WebAssemblyExceptionInfo &) = delete; + + bool runOnMachineFunction(MachineFunction &) override; + void releaseMemory() override; + void recalculate(MachineDominatorTree &MDT, + const MachineDominanceFrontier &MDF); + void getAnalysisUsage(AnalysisUsage &AU) const override; + + bool empty() const { return TopLevelExceptions.empty(); } + + // Return the innermost exception that MBB lives in. If the block is not in an + // exception, null is returned. + WebAssemblyException *getExceptionFor(const MachineBasicBlock *MBB) const { + return BBMap.lookup(MBB); + } + + void changeExceptionFor(MachineBasicBlock *MBB, WebAssemblyException *WE) { + if (!WE) { + BBMap.erase(MBB); + return; + } + BBMap[MBB] = WE; + } + + void addTopLevelException(WebAssemblyException *WE) { + assert(!WE->getParentException() && "Not a top level exception!"); + TopLevelExceptions.push_back(WE); + } + + void print(raw_ostream &OS, const Module *M = nullptr) const override; +}; + +} // end namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp index e2edb924d4d2..8619cbdcb5ee 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file converts any remaining registers into WebAssembly locals. +/// This file converts any remaining registers into WebAssembly locals. /// /// After register stackification and register coloring, convert non-stackified /// registers into locals, inserting explicit get_local and set_local @@ -60,6 +60,9 @@ public: } // end anonymous namespace char WebAssemblyExplicitLocals::ID = 0; +INITIALIZE_PASS(WebAssemblyExplicitLocals, DEBUG_TYPE, + "Convert registers to WebAssembly locals", false, false) + FunctionPass *llvm::createWebAssemblyExplicitLocals() { return new WebAssemblyExplicitLocals(); } @@ -86,6 +89,8 @@ static unsigned getDropOpcode(const TargetRegisterClass *RC) { return WebAssembly::DROP_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::DROP_V128; + if (RC == &WebAssembly::EXCEPT_REFRegClass) + return WebAssembly::DROP_EXCEPT_REF; llvm_unreachable("Unexpected register class"); } @@ -101,6 +106,8 @@ static unsigned getGetLocalOpcode(const TargetRegisterClass *RC) { return WebAssembly::GET_LOCAL_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::GET_LOCAL_V128; + if (RC == &WebAssembly::EXCEPT_REFRegClass) + return WebAssembly::GET_LOCAL_EXCEPT_REF; llvm_unreachable("Unexpected register class"); } @@ -116,6 +123,8 @@ static unsigned getSetLocalOpcode(const TargetRegisterClass *RC) { return WebAssembly::SET_LOCAL_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::SET_LOCAL_V128; + if (RC == &WebAssembly::EXCEPT_REFRegClass) + return WebAssembly::SET_LOCAL_EXCEPT_REF; llvm_unreachable("Unexpected register class"); } @@ -131,6 +140,8 @@ static unsigned getTeeLocalOpcode(const TargetRegisterClass *RC) { return WebAssembly::TEE_LOCAL_F64; if (RC == &WebAssembly::V128RegClass) return WebAssembly::TEE_LOCAL_V128; + if (RC == &WebAssembly::EXCEPT_REFRegClass) + return WebAssembly::TEE_LOCAL_EXCEPT_REF; llvm_unreachable("Unexpected register class"); } @@ -144,6 +155,8 @@ static MVT typeForRegClass(const TargetRegisterClass *RC) { return MVT::f32; if (RC == &WebAssembly::F64RegClass) return MVT::f64; + if (RC == &WebAssembly::EXCEPT_REFRegClass) + return MVT::ExceptRef; llvm_unreachable("unrecognized register class"); } @@ -168,19 +181,14 @@ static MachineInstr *FindStartOfTree(MachineOperand &MO, } bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** Make Locals Explicit **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Make Locals Explicit **********\n" + "********** Function: " + << MF.getName() << '\n'); // Disable this pass if directed to do so. if (DisableWebAssemblyExplicitLocals) return false; - // Disable this pass if we aren't doing direct wasm object emission. - if (MF.getSubtarget<WebAssemblySubtarget>() - .getTargetTriple().isOSBinFormatELF()) - return false; - bool Changed = false; MachineRegisterInfo &MRI = MF.getRegInfo(); WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>(); @@ -218,7 +226,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { MachineInstr &MI = *I++; assert(!WebAssembly::isArgument(MI)); - if (MI.isDebugValue() || MI.isLabel()) + if (MI.isDebugInstr() || MI.isLabel()) continue; // Replace tee instructions with tee_local. The difference is that tee @@ -271,8 +279,11 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { } if (UseEmpty[TargetRegisterInfo::virtReg2Index(OldReg)]) { unsigned Opc = getDropOpcode(RC); - BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc)) - .addReg(NewReg); + MachineInstr *Drop = + BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc)) + .addReg(NewReg); + // After the drop instruction, this reg operand will not be used + Drop->getOperand(0).setIsKill(); } else { unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg); unsigned Opc = getSetLocalOpcode(RC); @@ -281,6 +292,9 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { .addReg(NewReg); } MI.getOperand(0).setReg(NewReg); + // This register operand is now being used by the inserted drop + // instruction, so make it undead. + MI.getOperand(0).setIsDead(false); MFI.stackifyVReg(NewReg); Changed = true; } @@ -362,7 +376,7 @@ bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) { // Assert that all registers have been stackified at this point. for (const MachineBasicBlock &MBB : MF) { for (const MachineInstr &MI : MBB) { - if (MI.isDebugValue() || MI.isLabel()) + if (MI.isDebugInstr() || MI.isLabel()) continue; for (const MachineOperand &MO : MI.explicit_operands()) { assert( diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp index 7e284ea950fd..566ef68c027d 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly-specific support for the FastISel +/// This file defines the WebAssembly-specific support for the FastISel /// class. Some of the target-specific code is generated by tablegen in the file /// WebAssemblyGenFastISel.inc, which is #included here. /// @@ -127,6 +127,7 @@ private: case MVT::i64: case MVT::f32: case MVT::f64: + case MVT::ExceptRef: return VT; case MVT::f16: return MVT::f32; @@ -418,7 +419,7 @@ unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V, bool &Not) { return getRegForValue(ICmp->getOperand(0)); } - if (BinaryOperator::isNot(V)) { + if (BinaryOperator::isNot(V) && V->getType()->isIntegerTy(32)) { Not = true; return getRegForValue(BinaryOperator::getNotArgument(V)); } @@ -681,6 +682,10 @@ bool WebAssemblyFastISel::fastLowerArguments() { Opc = WebAssembly::ARGUMENT_v4f32; RC = &WebAssembly::V128RegClass; break; + case MVT::ExceptRef: + Opc = WebAssembly::ARGUMENT_EXCEPT_REF; + RC = &WebAssembly::EXCEPT_REFRegClass; + break; default: return false; } @@ -695,11 +700,23 @@ bool WebAssemblyFastISel::fastLowerArguments() { MRI.addLiveIn(WebAssembly::ARGUMENTS); auto *MFI = MF->getInfo<WebAssemblyFunctionInfo>(); - for (auto const &Arg : F->args()) - MFI->addParam(getLegalType(getSimpleType(Arg.getType()))); + for (auto const &Arg : F->args()) { + MVT::SimpleValueType ArgTy = getLegalType(getSimpleType(Arg.getType())); + if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE) { + MFI->clearParamsAndResults(); + return false; + } + MFI->addParam(ArgTy); + } - if (!F->getReturnType()->isVoidTy()) - MFI->addResult(getLegalType(getSimpleType(F->getReturnType()))); + if (!F->getReturnType()->isVoidTy()) { + MVT::SimpleValueType RetTy = getLegalType(getSimpleType(F->getReturnType())); + if (RetTy == MVT::INVALID_SIMPLE_VALUE_TYPE) { + MFI->clearParamsAndResults(); + return false; + } + MFI->addResult(RetTy); + } return true; } @@ -770,6 +787,11 @@ bool WebAssemblyFastISel::selectCall(const Instruction *I) { IsDirect ? WebAssembly::CALL_v4f32 : WebAssembly::PCALL_INDIRECT_v4f32; ResultReg = createResultReg(&WebAssembly::V128RegClass); break; + case MVT::ExceptRef: + Opc = IsDirect ? WebAssembly::CALL_EXCEPT_REF + : WebAssembly::PCALL_INDIRECT_EXCEPT_REF; + ResultReg = createResultReg(&WebAssembly::EXCEPT_REFRegClass); + break; default: return false; } @@ -868,6 +890,10 @@ bool WebAssemblyFastISel::selectSelect(const Instruction *I) { Opc = WebAssembly::SELECT_F64; RC = &WebAssembly::F64RegClass; break; + case MVT::ExceptRef: + Opc = WebAssembly::SELECT_EXCEPT_REF; + RC = &WebAssembly::EXCEPT_REFRegClass; + break; default: return false; } @@ -1165,6 +1191,7 @@ bool WebAssemblyFastISel::selectStore(const Instruction *I) { switch (getSimpleType(Store->getValueOperand()->getType())) { case MVT::i1: VTIsi1 = true; + LLVM_FALLTHROUGH; case MVT::i8: Opc = WebAssembly::STORE8_I32; break; @@ -1273,6 +1300,9 @@ bool WebAssemblyFastISel::selectRet(const Instruction *I) { case MVT::v4f32: Opc = WebAssembly::RETURN_v4f32; break; + case MVT::ExceptRef: + Opc = WebAssembly::RETURN_EXCEPT_REF; + break; default: return false; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp index 666337acccce..d5e47ee82513 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Fix bitcasted functions. +/// Fix bitcasted functions. /// /// WebAssembly requires caller and callee signatures to match, however in LLVM, /// some amount of slop is vaguely permitted. Detect mismatch by looking for @@ -61,6 +61,9 @@ public: } // End anonymous namespace char FixFunctionBitcasts::ID = 0; +INITIALIZE_PASS(FixFunctionBitcasts, DEBUG_TYPE, + "Fix mismatching bitcasts for WebAssembly", false, false) + ModulePass *llvm::createWebAssemblyFixFunctionBitcasts() { return new FixFunctionBitcasts(); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp index 88daea7e3681..bea027be7711 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a pass that transforms irreducible control flow +/// This file implements a pass that transforms irreducible control flow /// into reducible control flow. Irreducible control flow means multiple-entry /// loops; they appear as CFG cycles that are not recorded in MachineLoopInfo /// due to being unnatural. @@ -71,6 +71,9 @@ public: } // end anonymous namespace char WebAssemblyFixIrreducibleControlFlow::ID = 0; +INITIALIZE_PASS(WebAssemblyFixIrreducibleControlFlow, DEBUG_TYPE, + "Removes irreducible control flow", false, false) + FunctionPass *llvm::createWebAssemblyFixIrreducibleControlFlow() { return new WebAssemblyFixIrreducibleControlFlow(); } @@ -136,7 +139,7 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF, MachineBasicBlock *Header = Loop ? Loop->getHeader() : &*MF.begin(); SetVector<MachineBasicBlock *> RewriteSuccs; - // DFS through Loop's body, looking for for irreducible control flow. Loop is + // DFS through Loop's body, looking for irreducible control flow. Loop is // natural, and we stay in its body, and we treat any nested loops // monolithically, so any cycles we encounter indicate irreducibility. SmallPtrSet<MachineBasicBlock *, 8> OnStack; @@ -174,7 +177,7 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF, if (LLVM_LIKELY(RewriteSuccs.empty())) return false; - DEBUG(dbgs() << "Irreducible control flow detected!\n"); + LLVM_DEBUG(dbgs() << "Irreducible control flow detected!\n"); // Ok. We have irreducible control flow! Create a dispatch block which will // contains a jump table to any block in the problematic set of blocks. @@ -205,7 +208,8 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF, continue; unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1; - DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index << "\n"); + LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index + << "\n"); Pair.first->second = Index; for (auto Pred : MBB->predecessors()) @@ -264,9 +268,9 @@ bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF, bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction( MachineFunction &MF) { - DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n" + "********** Function: " + << MF.getName() << '\n'); bool Changed = false; auto &MLI = getAnalysis<MachineLoopInfo>(); @@ -284,7 +288,7 @@ bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction( // If we made any changes, completely recompute everything. if (LLVM_UNLIKELY(Changed)) { - DEBUG(dbgs() << "Recomputing dominators and loops.\n"); + LLVM_DEBUG(dbgs() << "Recomputing dominators and loops.\n"); MF.getRegInfo().invalidateLiveness(); MF.RenumberBlocks(); getAnalysis<MachineDominatorTree>().runOnMachineFunction(MF); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp index 84246052f601..052c94e9d6a9 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of +/// This file contains the WebAssembly implementation of /// TargetFrameLowering class. /// /// On WebAssembly, there aren't a lot of things to do here. There are no @@ -106,29 +106,9 @@ static void writeSPToMemory(unsigned SrcReg, MachineFunction &MF, const char *ES = "__stack_pointer"; auto *SPSymbol = MF.createExternalSymbolName(ES); - if (MF.getSubtarget<WebAssemblySubtarget>() - .getTargetTriple().isOSBinFormatELF()) { - MachineRegisterInfo &MRI = MF.getRegInfo(); - const TargetRegisterClass *PtrRC = - MRI.getTargetRegisterInfo()->getPointerRegClass(MF); - unsigned Zero = MRI.createVirtualRegister(PtrRC); - - BuildMI(MBB, InsertAddr, DL, TII->get(WebAssembly::CONST_I32), Zero) - .addImm(0); - MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo(MF.getPSVManager().getExternalSymbolCallEntry(ES)), - MachineMemOperand::MOStore, 4, 4); - BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::STORE_I32)) - .addImm(2) // p2align - .addExternalSymbol(SPSymbol) - .addReg(Zero) - .addReg(SrcReg) - .addMemOperand(MMO); - } else { - BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::SET_GLOBAL_I32)) - .addExternalSymbol(SPSymbol) - .addReg(SrcReg); - } + BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::SET_GLOBAL_I32)) + .addExternalSymbol(SPSymbol) + .addReg(SrcReg); } MachineBasicBlock::iterator @@ -172,25 +152,8 @@ void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF, const char *ES = "__stack_pointer"; auto *SPSymbol = MF.createExternalSymbolName(ES); - if (MF.getSubtarget<WebAssemblySubtarget>() - .getTargetTriple().isOSBinFormatELF()) { - unsigned Zero = MRI.createVirtualRegister(PtrRC); - - BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), Zero) - .addImm(0); - MachineMemOperand *LoadMMO = MF.getMachineMemOperand( - MachinePointerInfo(MF.getPSVManager().getExternalSymbolCallEntry(ES)), - MachineMemOperand::MOLoad, 4, 4); - // Load the SP value. - BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::LOAD_I32), SPReg) - .addImm(2) // p2align - .addExternalSymbol(SPSymbol) - .addReg(Zero) // addr - .addMemOperand(LoadMMO); - } else { - BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GET_GLOBAL_I32), SPReg) - .addExternalSymbol(SPSymbol); - } + BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GET_GLOBAL_I32), SPReg) + .addExternalSymbol(SPSymbol); bool HasBP = hasBP(MF); if (HasBP) { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h index 4cc7f5ae058a..fe23e418a3f1 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This class implements WebAssembly-specific bits of +/// This class implements WebAssembly-specific bits of /// TargetFrameLowering class. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def index 2f0f106ef5b7..c12550feabbb 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file describes the various WebAssembly ISD node types. +/// This file describes the various WebAssembly ISD node types. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp index 9f40d35689a5..fdf3a30a5c0e 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines an instruction selector for the WebAssembly target. +/// This file defines an instruction selector for the WebAssembly target. /// //===----------------------------------------------------------------------===// @@ -68,27 +68,21 @@ private: } // end anonymous namespace void WebAssemblyDAGToDAGISel::Select(SDNode *Node) { - // Dump information about the Node being selected. - DEBUG(errs() << "Selecting: "); - DEBUG(Node->dump(CurDAG)); - DEBUG(errs() << "\n"); - // If we have a custom node, we already have selected! if (Node->isMachineOpcode()) { - DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); + LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); Node->setNodeId(-1); return; } - // Few custom selection stuff. - EVT VT = Node->getValueType(0); - + // Few custom selection stuff. If we need WebAssembly-specific selection, + // uncomment this block add corresponding case statements. + /* switch (Node->getOpcode()) { default: break; - // If we need WebAssembly-specific selection, it would go here. - (void)VT; } + */ // Select the default instruction. SelectCode(Node); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index 299009fa6674..283e703e1f6c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblyTargetLowering class. +/// This file implements the WebAssemblyTargetLowering class. /// //===----------------------------------------------------------------------===// @@ -117,8 +117,7 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( // As a special case, these operators use the type to mean the type to // sign-extend from. setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - if (!Subtarget->hasAtomics()) { - // The Atomics feature includes signext intructions. + if (!Subtarget->hasSignExt()) { for (auto T : {MVT::i8, MVT::i16, MVT::i32}) setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); } @@ -152,6 +151,9 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( // Trap lowers to wasm unreachable setOperationAction(ISD::TRAP, MVT::Other, Legal); + // Exception handling intrinsics + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + setMaxAtomicSizeInBitsSupported(64); } @@ -427,6 +429,15 @@ bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT, return true; } +EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL, + LLVMContext &C, + EVT VT) const { + if (VT.isVector()) + return VT.changeVectorElementTypeToInteger(); + + return TargetLowering::getSetCCResultType(DL, C, VT); +} + //===----------------------------------------------------------------------===// // WebAssembly Lowering private implementation. //===----------------------------------------------------------------------===// @@ -485,6 +496,7 @@ SDValue WebAssemblyTargetLowering::LowerCall( SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; + unsigned NumFixedArgs = 0; for (unsigned i = 0; i < Outs.size(); ++i) { const ISD::OutputArg &Out = Outs[i]; SDValue &OutVal = OutVals[i]; @@ -510,11 +522,11 @@ SDValue WebAssemblyTargetLowering::LowerCall( /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo()); OutVal = FINode; } + // Count the number of fixed args *after* legalization. + NumFixedArgs += Out.IsFixed; } bool IsVarArg = CLI.IsVarArg; - unsigned NumFixedArgs = CLI.NumFixedArgs; - auto PtrVT = getPointerTy(Layout); // Analyze operands of the call, assigning locations to each operand. @@ -738,6 +750,8 @@ SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op, return LowerFRAMEADDR(Op, DAG); case ISD::CopyToReg: return LowerCopyToReg(Op, DAG); + case ISD::INTRINSIC_WO_CHAIN: + return LowerINTRINSIC_WO_CHAIN(Op, DAG); } } @@ -870,6 +884,21 @@ SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op, MachinePointerInfo(SV), 0); } +SDValue +WebAssemblyTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); + SDLoc DL(Op); + switch (IntNo) { + default: + return {}; // Don't custom lower most intrinsics. + + case Intrinsic::wasm_lsda: + // TODO For now, just return 0 not to crash + return DAG.getConstant(0, DL, Op.getValueType()); + } +} + //===----------------------------------------------------------------------===// // WebAssembly Optimization Hooks //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h index 7bb8e71ab974..79819493ac6a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the interfaces that WebAssembly uses to lower LLVM +/// This file defines the interfaces that WebAssembly uses to lower LLVM /// code into a selection DAG. /// //===----------------------------------------------------------------------===// @@ -64,6 +64,9 @@ class WebAssemblyTargetLowering final : public TargetLowering { bool *Fast) const override; bool isIntDivCheap(EVT VT, AttributeList Attr) const override; + EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, + EVT VT) const override; + SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const override; bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, @@ -90,6 +93,7 @@ class WebAssemblyTargetLowering final : public TargetLowering { SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; }; namespace WebAssembly { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td index a49172df158f..d879932b3232 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Atomic operand code-gen constructs. +/// WebAssembly Atomic operand code-gen constructs. /// //===----------------------------------------------------------------------===// @@ -17,8 +17,8 @@ //===----------------------------------------------------------------------===// let Defs = [ARGUMENTS] in { -def ATOMIC_LOAD_I32 : WebAssemblyLoad<I32, "i32.atomic.load", 0xfe10>; -def ATOMIC_LOAD_I64 : WebAssemblyLoad<I64, "i64.atomic.load", 0xfe11>; +defm ATOMIC_LOAD_I32 : WebAssemblyLoad<I32, "i32.atomic.load", 0xfe10>; +defm ATOMIC_LOAD_I64 : WebAssemblyLoad<I64, "i64.atomic.load", 0xfe11>; } // Defs = [ARGUMENTS] // Select loads with no constant offset. @@ -40,7 +40,6 @@ def : LoadPatGlobalAddr<i64, atomic_load_64, ATOMIC_LOAD_I64>; def : LoadPatExternalSym<i32, atomic_load_32, ATOMIC_LOAD_I32>; def : LoadPatExternalSym<i64, atomic_load_64, ATOMIC_LOAD_I64>; - // Select loads with just a constant offset. def : LoadPatOffsetOnly<i32, atomic_load_32, ATOMIC_LOAD_I32>; def : LoadPatOffsetOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>; @@ -56,14 +55,14 @@ def : LoadPatExternSymOffOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>; // Extending loads. Note that there are only zero-extending atomic loads, no // sign-extending loads. let Defs = [ARGUMENTS] in { -def ATOMIC_LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load8_u", 0xfe12>; -def ATOMIC_LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load16_u", 0xfe13>; -def ATOMIC_LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load8_u", 0xfe14>; -def ATOMIC_LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load16_u", 0xfe15>; -def ATOMIC_LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load32_u", 0xfe16>; +defm ATOMIC_LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load8_u", 0xfe12>; +defm ATOMIC_LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load16_u", 0xfe13>; +defm ATOMIC_LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load8_u", 0xfe14>; +defm ATOMIC_LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load16_u", 0xfe15>; +defm ATOMIC_LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load32_u", 0xfe16>; } // Defs = [ARGUMENTS] -// Fragments for exending loads. These are different from regular loads because +// Fragments for extending loads. These are different from regular loads because // the SDNodes are derived from AtomicSDNode rather than LoadSDNode and // therefore don't have the extension type field. So instead of matching that, // we match the patterns that the type legalizer expands them to. @@ -72,10 +71,10 @@ def ATOMIC_LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load32_u", 0xfe16>; // i32 (zext (i8 (atomic_load_8))) gets legalized to // i32 (and (i32 (atomic_load_8)), 255) // These can be selected to a single zero-extending atomic load instruction. -def zext_aload_8 : PatFrag<(ops node:$addr), - (and (i32 (atomic_load_8 node:$addr)), 255)>; -def zext_aload_16 : PatFrag<(ops node:$addr), - (and (i32 (atomic_load_16 node:$addr)), 65535)>; +def zext_aload_8_32 : + PatFrag<(ops node:$addr), (and (i32 (atomic_load_8 node:$addr)), 255)>; +def zext_aload_16_32 : + PatFrag<(ops node:$addr), (and (i32 (atomic_load_16 node:$addr)), 65535)>; // Unlike regular loads, extension to i64 is handled differently than i32. // i64 (zext (i8 (atomic_load_8))) gets legalized to // i64 (and (i64 (anyext (i32 (atomic_load_8)))), 255) @@ -93,15 +92,15 @@ def zext_aload_32_64 : // match bare subword loads (for 32-bit results) and anyext loads (for 64-bit // results) and select a zext load; the next instruction will be sext_inreg // which is selected by itself. -def anyext_aload_8_64 : +def sext_aload_8_64 : PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_8 node:$addr)))>; -def anyext_aload_16_64 : +def sext_aload_16_64 : PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_16 node:$addr)))>; let Predicates = [HasAtomics] in { // Select zero-extending loads with no constant offset. -def : LoadPatNoOffset<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatNoOffset<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>; +def : LoadPatNoOffset<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; +def : LoadPatNoOffset<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; def : LoadPatNoOffset<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatNoOffset<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; def : LoadPatNoOffset<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; @@ -109,16 +108,15 @@ def : LoadPatNoOffset<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; // Select sign-extending loads with no constant offset def : LoadPatNoOffset<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; def : LoadPatNoOffset<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatNoOffset<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatNoOffset<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>; -// 32->64 sext load gets selected as i32.atomic.load, i64.extend_s/i64 - +def : LoadPatNoOffset<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; +def : LoadPatNoOffset<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; +// 32->64 sext load gets selected as i32.atomic.load, i64.extend_s/i32 // Zero-extending loads with constant offset -def : LoadPatImmOff<i32, zext_aload_8, regPlusImm, ATOMIC_LOAD8_U_I32>; -def : LoadPatImmOff<i32, zext_aload_16, regPlusImm, ATOMIC_LOAD16_U_I32>; -def : LoadPatImmOff<i32, zext_aload_8, or_is_add, ATOMIC_LOAD8_U_I32>; -def : LoadPatImmOff<i32, zext_aload_16, or_is_add, ATOMIC_LOAD16_U_I32>; +def : LoadPatImmOff<i32, zext_aload_8_32, regPlusImm, ATOMIC_LOAD8_U_I32>; +def : LoadPatImmOff<i32, zext_aload_16_32, regPlusImm, ATOMIC_LOAD16_U_I32>; +def : LoadPatImmOff<i32, zext_aload_8_32, or_is_add, ATOMIC_LOAD8_U_I32>; +def : LoadPatImmOff<i32, zext_aload_16_32, or_is_add, ATOMIC_LOAD16_U_I32>; def : LoadPatImmOff<i64, zext_aload_8_64, regPlusImm, ATOMIC_LOAD8_U_I64>; def : LoadPatImmOff<i64, zext_aload_16_64, regPlusImm, ATOMIC_LOAD16_U_I64>; def : LoadPatImmOff<i64, zext_aload_32_64, regPlusImm, ATOMIC_LOAD32_U_I64>; @@ -131,64 +129,62 @@ def : LoadPatImmOff<i32, atomic_load_8, regPlusImm, ATOMIC_LOAD8_U_I32>; def : LoadPatImmOff<i32, atomic_load_16, regPlusImm, ATOMIC_LOAD16_U_I32>; def : LoadPatImmOff<i32, atomic_load_8, or_is_add, ATOMIC_LOAD8_U_I32>; def : LoadPatImmOff<i32, atomic_load_16, or_is_add, ATOMIC_LOAD16_U_I32>; -def : LoadPatImmOff<i64, anyext_aload_8_64, regPlusImm, ATOMIC_LOAD8_U_I64>; -def : LoadPatImmOff<i64, anyext_aload_16_64, regPlusImm, ATOMIC_LOAD16_U_I64>; -def : LoadPatImmOff<i64, anyext_aload_8_64, or_is_add, ATOMIC_LOAD8_U_I64>; -def : LoadPatImmOff<i64, anyext_aload_16_64, or_is_add, ATOMIC_LOAD16_U_I64>; +def : LoadPatImmOff<i64, sext_aload_8_64, regPlusImm, ATOMIC_LOAD8_U_I64>; +def : LoadPatImmOff<i64, sext_aload_16_64, regPlusImm, ATOMIC_LOAD16_U_I64>; +def : LoadPatImmOff<i64, sext_aload_8_64, or_is_add, ATOMIC_LOAD8_U_I64>; +def : LoadPatImmOff<i64, sext_aload_16_64, or_is_add, ATOMIC_LOAD16_U_I64>; // No 32->64 patterns, just use i32.atomic.load and i64.extend_s/i64 -def : LoadPatGlobalAddr<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatGlobalAddr<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>; +def : LoadPatGlobalAddr<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; +def : LoadPatGlobalAddr<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; def : LoadPatGlobalAddr<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatGlobalAddr<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; def : LoadPatGlobalAddr<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; def : LoadPatGlobalAddr<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; def : LoadPatGlobalAddr<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatGlobalAddr<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatGlobalAddr<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>; +def : LoadPatGlobalAddr<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; +def : LoadPatGlobalAddr<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; -def : LoadPatExternalSym<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatExternalSym<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>; +def : LoadPatExternalSym<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; +def : LoadPatExternalSym<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; def : LoadPatExternalSym<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatExternalSym<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; def : LoadPatExternalSym<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; def : LoadPatExternalSym<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; def : LoadPatExternalSym<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatExternalSym<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatExternalSym<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>; - +def : LoadPatExternalSym<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; +def : LoadPatExternalSym<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; // Extending loads with just a constant offset -def : LoadPatOffsetOnly<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatOffsetOnly<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>; +def : LoadPatOffsetOnly<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; +def : LoadPatOffsetOnly<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; def : LoadPatOffsetOnly<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatOffsetOnly<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; def : LoadPatOffsetOnly<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; def : LoadPatOffsetOnly<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; def : LoadPatOffsetOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatOffsetOnly<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatOffsetOnly<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>; +def : LoadPatOffsetOnly<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; +def : LoadPatOffsetOnly<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; -def : LoadPatGlobalAddrOffOnly<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatGlobalAddrOffOnly<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>; +def : LoadPatGlobalAddrOffOnly<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; +def : LoadPatGlobalAddrOffOnly<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; def : LoadPatGlobalAddrOffOnly<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatGlobalAddrOffOnly<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; def : LoadPatGlobalAddrOffOnly<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; def : LoadPatGlobalAddrOffOnly<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; def : LoadPatGlobalAddrOffOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatGlobalAddrOffOnly<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatGlobalAddrOffOnly<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>; +def : LoadPatGlobalAddrOffOnly<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; +def : LoadPatGlobalAddrOffOnly<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; -def : LoadPatExternSymOffOnly<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>; -def : LoadPatExternSymOffOnly<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>; +def : LoadPatExternSymOffOnly<i32, zext_aload_8_32, ATOMIC_LOAD8_U_I32>; +def : LoadPatExternSymOffOnly<i32, zext_aload_16_32, ATOMIC_LOAD16_U_I32>; def : LoadPatExternSymOffOnly<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>; def : LoadPatExternSymOffOnly<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>; def : LoadPatExternSymOffOnly<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>; def : LoadPatExternSymOffOnly<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>; def : LoadPatExternSymOffOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>; -def : LoadPatExternSymOffOnly<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>; -def : LoadPatExternSymOffOnly<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>; - +def : LoadPatExternSymOffOnly<i64, sext_aload_8_64, ATOMIC_LOAD8_U_I64>; +def : LoadPatExternSymOffOnly<i64, sext_aload_16_64, ATOMIC_LOAD16_U_I64>; } // Predicates = [HasAtomics] @@ -196,19 +192,466 @@ def : LoadPatExternSymOffOnly<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>; // Atomic stores //===----------------------------------------------------------------------===// -// TODO: add atomic stores here... +let Defs = [ARGUMENTS] in { +defm ATOMIC_STORE_I32 : WebAssemblyStore<I32, "i32.atomic.store", 0xfe17>; +defm ATOMIC_STORE_I64 : WebAssemblyStore<I64, "i64.atomic.store", 0xfe18>; +} // Defs = [ARGUMENTS] + +// We need an 'atomic' version of store patterns because store and atomic_store +// nodes have different operand orders: +// store: (store $val, $ptr) +// atomic_store: (store $ptr, $val) + +let Predicates = [HasAtomics] in { + +// Select stores with no constant offset. +class AStorePatNoOffset<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind I32:$addr, ty:$val), (inst 0, 0, I32:$addr, ty:$val)>; +def : AStorePatNoOffset<i32, atomic_store_32, ATOMIC_STORE_I32>; +def : AStorePatNoOffset<i64, atomic_store_64, ATOMIC_STORE_I64>; + +// Select stores with a constant offset. + +// Pattern with address + immediate offset +class AStorePatImmOff<ValueType ty, PatFrag kind, PatFrag operand, NI inst> : + Pat<(kind (operand I32:$addr, imm:$off), ty:$val), + (inst 0, imm:$off, I32:$addr, ty:$val)>; +def : AStorePatImmOff<i32, atomic_store_32, regPlusImm, ATOMIC_STORE_I32>; +def : AStorePatImmOff<i64, atomic_store_64, regPlusImm, ATOMIC_STORE_I64>; +def : AStorePatImmOff<i32, atomic_store_32, or_is_add, ATOMIC_STORE_I32>; +def : AStorePatImmOff<i64, atomic_store_64, or_is_add, ATOMIC_STORE_I64>; + +class AStorePatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)), + ty:$val), + (inst 0, tglobaladdr:$off, I32:$addr, ty:$val)>; +def : AStorePatGlobalAddr<i32, atomic_store_32, ATOMIC_STORE_I32>; +def : AStorePatGlobalAddr<i64, atomic_store_64, ATOMIC_STORE_I64>; + +class AStorePatExternalSym<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)), ty:$val), + (inst 0, texternalsym:$off, I32:$addr, ty:$val)>; +def : AStorePatExternalSym<i32, atomic_store_32, ATOMIC_STORE_I32>; +def : AStorePatExternalSym<i64, atomic_store_64, ATOMIC_STORE_I64>; + +// Select stores with just a constant offset. +class AStorePatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind imm:$off, ty:$val), (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; +def : AStorePatOffsetOnly<i32, atomic_store_32, ATOMIC_STORE_I32>; +def : AStorePatOffsetOnly<i64, atomic_store_64, ATOMIC_STORE_I64>; + +class AStorePatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind (WebAssemblywrapper tglobaladdr:$off), ty:$val), + (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; +def : AStorePatGlobalAddrOffOnly<i32, atomic_store_32, ATOMIC_STORE_I32>; +def : AStorePatGlobalAddrOffOnly<i64, atomic_store_64, ATOMIC_STORE_I64>; + +class AStorePatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind (WebAssemblywrapper texternalsym:$off), ty:$val), + (inst 0, texternalsym:$off, (CONST_I32 0), ty:$val)>; +def : AStorePatExternSymOffOnly<i32, atomic_store_32, ATOMIC_STORE_I32>; +def : AStorePatExternSymOffOnly<i64, atomic_store_64, ATOMIC_STORE_I64>; + +} // Predicates = [HasAtomics] + +// Truncating stores. +let Defs = [ARGUMENTS] in { +defm ATOMIC_STORE8_I32 : WebAssemblyStore<I32, "i32.atomic.store8", 0xfe19>; +defm ATOMIC_STORE16_I32 : WebAssemblyStore<I32, "i32.atomic.store16", 0xfe1a>; +defm ATOMIC_STORE8_I64 : WebAssemblyStore<I64, "i64.atomic.store8", 0xfe1b>; +defm ATOMIC_STORE16_I64 : WebAssemblyStore<I64, "i64.atomic.store16", 0xfe1c>; +defm ATOMIC_STORE32_I64 : WebAssemblyStore<I64, "i64.atomic.store32", 0xfe1d>; +} // Defs = [ARGUMENTS] + +// Fragments for truncating stores. + +// We don't have single truncating atomic store instructions. For 32-bit +// instructions, we just need to match bare atomic stores. On the other hand, +// truncating stores from i64 values are once truncated to i32 first. +class trunc_astore_64<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), + (kind node:$addr, (i32 (trunc (i64 node:$val))))>; +def trunc_astore_8_64 : trunc_astore_64<atomic_store_8>; +def trunc_astore_16_64 : trunc_astore_64<atomic_store_16>; +def trunc_astore_32_64 : trunc_astore_64<atomic_store_32>; + +let Predicates = [HasAtomics] in { + +// Truncating stores with no constant offset +def : AStorePatNoOffset<i32, atomic_store_8, ATOMIC_STORE8_I32>; +def : AStorePatNoOffset<i32, atomic_store_16, ATOMIC_STORE16_I32>; +def : AStorePatNoOffset<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; +def : AStorePatNoOffset<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; +def : AStorePatNoOffset<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; + +// Truncating stores with a constant offset +def : AStorePatImmOff<i32, atomic_store_8, regPlusImm, ATOMIC_STORE8_I32>; +def : AStorePatImmOff<i32, atomic_store_16, regPlusImm, ATOMIC_STORE16_I32>; +def : AStorePatImmOff<i64, trunc_astore_8_64, regPlusImm, ATOMIC_STORE8_I64>; +def : AStorePatImmOff<i64, trunc_astore_16_64, regPlusImm, ATOMIC_STORE16_I64>; +def : AStorePatImmOff<i64, trunc_astore_32_64, regPlusImm, ATOMIC_STORE32_I64>; +def : AStorePatImmOff<i32, atomic_store_8, or_is_add, ATOMIC_STORE8_I32>; +def : AStorePatImmOff<i32, atomic_store_16, or_is_add, ATOMIC_STORE16_I32>; +def : AStorePatImmOff<i64, trunc_astore_8_64, or_is_add, ATOMIC_STORE8_I64>; +def : AStorePatImmOff<i64, trunc_astore_16_64, or_is_add, ATOMIC_STORE16_I64>; +def : AStorePatImmOff<i64, trunc_astore_32_64, or_is_add, ATOMIC_STORE32_I64>; + +def : AStorePatGlobalAddr<i32, atomic_store_8, ATOMIC_STORE8_I32>; +def : AStorePatGlobalAddr<i32, atomic_store_16, ATOMIC_STORE16_I32>; +def : AStorePatGlobalAddr<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; +def : AStorePatGlobalAddr<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; +def : AStorePatGlobalAddr<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; + +def : AStorePatExternalSym<i32, atomic_store_8, ATOMIC_STORE8_I32>; +def : AStorePatExternalSym<i32, atomic_store_16, ATOMIC_STORE16_I32>; +def : AStorePatExternalSym<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; +def : AStorePatExternalSym<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; +def : AStorePatExternalSym<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; + +// Truncating stores with just a constant offset +def : AStorePatOffsetOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>; +def : AStorePatOffsetOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>; +def : AStorePatOffsetOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; +def : AStorePatOffsetOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; +def : AStorePatOffsetOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; + +def : AStorePatGlobalAddrOffOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>; +def : AStorePatGlobalAddrOffOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>; +def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; +def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; +def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; + +def : AStorePatExternSymOffOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>; +def : AStorePatExternSymOffOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>; +def : AStorePatExternSymOffOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>; +def : AStorePatExternSymOffOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>; +def : AStorePatExternSymOffOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>; + +} // Predicates = [HasAtomics] //===----------------------------------------------------------------------===// -// Low-level exclusive operations +// Atomic binary read-modify-writes //===----------------------------------------------------------------------===// -// TODO: add exclusive operations here... +let Defs = [ARGUMENTS] in { + +multiclass WebAssemblyBinRMW<WebAssemblyRegClass rc, string Name, int Opcode> { + defm "" : I<(outs rc:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), + (outs), (ins P2Align:$p2align, offset32_op:$off), [], + !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}, $val"), + !strconcat(Name, "\t${off}, ${p2align}"), Opcode>; +} + +defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.add", 0xfe1e>; +defm ATOMIC_RMW_ADD_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.add", 0xfe1f>; +defm ATOMIC_RMW8_U_ADD_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.add", 0xfe20>; +defm ATOMIC_RMW16_U_ADD_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.add", 0xfe21>; +defm ATOMIC_RMW8_U_ADD_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.add", 0xfe22>; +defm ATOMIC_RMW16_U_ADD_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.add", 0xfe23>; +defm ATOMIC_RMW32_U_ADD_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.add", 0xfe24>; + +defm ATOMIC_RMW_SUB_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.sub", 0xfe25>; +defm ATOMIC_RMW_SUB_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.sub", 0xfe26>; +defm ATOMIC_RMW8_U_SUB_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.sub", 0xfe27>; +defm ATOMIC_RMW16_U_SUB_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.sub", 0xfe28>; +defm ATOMIC_RMW8_U_SUB_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.sub", 0xfe29>; +defm ATOMIC_RMW16_U_SUB_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.sub", 0xfe2a>; +defm ATOMIC_RMW32_U_SUB_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.sub", 0xfe2b>; + +defm ATOMIC_RMW_AND_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.and", 0xfe2c>; +defm ATOMIC_RMW_AND_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.and", 0xfe2d>; +defm ATOMIC_RMW8_U_AND_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.and", 0xfe2e>; +defm ATOMIC_RMW16_U_AND_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.and", 0xfe2f>; +defm ATOMIC_RMW8_U_AND_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.and", 0xfe30>; +defm ATOMIC_RMW16_U_AND_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.and", 0xfe31>; +defm ATOMIC_RMW32_U_AND_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.and", 0xfe32>; + +defm ATOMIC_RMW_OR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.or", 0xfe33>; +defm ATOMIC_RMW_OR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.or", 0xfe34>; +defm ATOMIC_RMW8_U_OR_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.or", 0xfe35>; +defm ATOMIC_RMW16_U_OR_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.or", 0xfe36>; +defm ATOMIC_RMW8_U_OR_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.or", 0xfe37>; +defm ATOMIC_RMW16_U_OR_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.or", 0xfe38>; +defm ATOMIC_RMW32_U_OR_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.or", 0xfe39>; + +defm ATOMIC_RMW_XOR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.xor", 0xfe3a>; +defm ATOMIC_RMW_XOR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.xor", 0xfe3b>; +defm ATOMIC_RMW8_U_XOR_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.xor", 0xfe3c>; +defm ATOMIC_RMW16_U_XOR_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.xor", 0xfe3d>; +defm ATOMIC_RMW8_U_XOR_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.xor", 0xfe3e>; +defm ATOMIC_RMW16_U_XOR_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.xor", 0xfe3f>; +defm ATOMIC_RMW32_U_XOR_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.xor", 0xfe40>; + +defm ATOMIC_RMW_XCHG_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw.xchg", 0xfe41>; +defm ATOMIC_RMW_XCHG_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw.xchg", 0xfe42>; +defm ATOMIC_RMW8_U_XCHG_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw8_u.xchg", 0xfe43>; +defm ATOMIC_RMW16_U_XCHG_I32 : + WebAssemblyBinRMW<I32, "i32.atomic.rmw16_u.xchg", 0xfe44>; +defm ATOMIC_RMW8_U_XCHG_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw8_u.xchg", 0xfe45>; +defm ATOMIC_RMW16_U_XCHG_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw16_u.xchg", 0xfe46>; +defm ATOMIC_RMW32_U_XCHG_I64 : + WebAssemblyBinRMW<I64, "i64.atomic.rmw32_u.xchg", 0xfe47>; +} + +// Select binary RMWs with no constant offset. +class BinRMWPatNoOffset<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind I32:$addr, ty:$val)), (inst 0, 0, I32:$addr, ty:$val)>; + +// Select binary RMWs with a constant offset. + +// Pattern with address + immediate offset +class BinRMWPatImmOff<ValueType ty, PatFrag kind, PatFrag operand, NI inst> : + Pat<(ty (kind (operand I32:$addr, imm:$off), ty:$val)), + (inst 0, imm:$off, I32:$addr, ty:$val)>; + +class BinRMWPatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)), + ty:$val)), + (inst 0, tglobaladdr:$off, I32:$addr, ty:$val)>; + +class BinRMWPatExternalSym<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)), + ty:$val)), + (inst 0, texternalsym:$off, I32:$addr, ty:$val)>; + +// Select binary RMWs with just a constant offset. +class BinRMWPatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind imm:$off, ty:$val)), + (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; + +class BinRMWPatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$val)), + (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; -// Load-exclusives. +class BinRMWPatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (WebAssemblywrapper texternalsym:$off), ty:$val)), + (inst 0, texternalsym:$off, (CONST_I32 0), ty:$val)>; -// Store-exclusives. +// Patterns for various addressing modes. +multiclass BinRMWPattern<PatFrag rmw_32, PatFrag rmw_64, NI inst_32, + NI inst_64> { + def : BinRMWPatNoOffset<i32, rmw_32, inst_32>; + def : BinRMWPatNoOffset<i64, rmw_64, inst_64>; -// Store-release-exclusives. + def : BinRMWPatImmOff<i32, rmw_32, regPlusImm, inst_32>; + def : BinRMWPatImmOff<i64, rmw_64, regPlusImm, inst_64>; + def : BinRMWPatImmOff<i32, rmw_32, or_is_add, inst_32>; + def : BinRMWPatImmOff<i64, rmw_64, or_is_add, inst_64>; -// And clear exclusive. + def : BinRMWPatGlobalAddr<i32, rmw_32, inst_32>; + def : BinRMWPatGlobalAddr<i64, rmw_64, inst_64>; + def : BinRMWPatExternalSym<i32, rmw_32, inst_32>; + def : BinRMWPatExternalSym<i64, rmw_64, inst_64>; + + def : BinRMWPatOffsetOnly<i32, rmw_32, inst_32>; + def : BinRMWPatOffsetOnly<i64, rmw_64, inst_64>; + + def : BinRMWPatGlobalAddrOffOnly<i32, rmw_32, inst_32>; + def : BinRMWPatGlobalAddrOffOnly<i64, rmw_64, inst_64>; + + def : BinRMWPatExternSymOffOnly<i32, rmw_32, inst_32>; + def : BinRMWPatExternSymOffOnly<i64, rmw_64, inst_64>; +} + +let Predicates = [HasAtomics] in { +defm : BinRMWPattern<atomic_load_add_32, atomic_load_add_64, ATOMIC_RMW_ADD_I32, + ATOMIC_RMW_ADD_I64>; +defm : BinRMWPattern<atomic_load_sub_32, atomic_load_sub_64, ATOMIC_RMW_SUB_I32, + ATOMIC_RMW_SUB_I64>; +defm : BinRMWPattern<atomic_load_and_32, atomic_load_and_64, ATOMIC_RMW_AND_I32, + ATOMIC_RMW_AND_I64>; +defm : BinRMWPattern<atomic_load_or_32, atomic_load_or_64, ATOMIC_RMW_OR_I32, + ATOMIC_RMW_OR_I64>; +defm : BinRMWPattern<atomic_load_xor_32, atomic_load_xor_64, ATOMIC_RMW_XOR_I32, + ATOMIC_RMW_XOR_I64>; +defm : BinRMWPattern<atomic_swap_32, atomic_swap_64, ATOMIC_RMW_XCHG_I32, + ATOMIC_RMW_XCHG_I64>; +} // Predicates = [HasAtomics] + +// Truncating & zero-extending binary RMW patterns. +// These are combined patterns of truncating store patterns and zero-extending +// load patterns above. +class zext_bin_rmw_8_32<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), + (and (i32 (kind node:$addr, node:$val)), 255)>; +class zext_bin_rmw_16_32<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), + (and (i32 (kind node:$addr, node:$val)), 65535)>; +class zext_bin_rmw_8_64<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), + (and (i64 (anyext (i32 (kind node:$addr, + (i32 (trunc (i64 node:$val))))))), 255)>; +class zext_bin_rmw_16_64<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), + (and (i64 (anyext (i32 (kind node:$addr, + (i32 (trunc (i64 node:$val))))))), 65535)>; +class zext_bin_rmw_32_64<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), + (zext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>; + +// Truncating & sign-extending binary RMW patterns. +// These are combined patterns of truncating store patterns and sign-extending +// load patterns above. We match subword RMWs (for 32-bit) and anyext RMWs (for +// 64-bit) and select a zext RMW; the next instruction will be sext_inreg which +// is selected by itself. +class sext_bin_rmw_8_32<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), (kind node:$addr, node:$val)>; +class sext_bin_rmw_16_32<PatFrag kind> : sext_bin_rmw_8_32<kind>; +class sext_bin_rmw_8_64<PatFrag kind> : + PatFrag<(ops node:$addr, node:$val), + (anyext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>; +class sext_bin_rmw_16_64<PatFrag kind> : sext_bin_rmw_8_64<kind>; +// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_s/i32 + +// Patterns for various addressing modes for truncating-extending binary RMWs. +multiclass BinRMWTruncExtPattern< + PatFrag rmw_8, PatFrag rmw_16, PatFrag rmw_32, PatFrag rmw_64, + NI inst8_32, NI inst16_32, NI inst8_64, NI inst16_64, NI inst32_64> { + // Truncating-extending binary RMWs with no constant offset + def : BinRMWPatNoOffset<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatNoOffset<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatNoOffset<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatNoOffset<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; + def : BinRMWPatNoOffset<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; + + def : BinRMWPatNoOffset<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatNoOffset<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatNoOffset<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatNoOffset<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; + + // Truncating-extending binary RMWs with a constant offset + def : BinRMWPatImmOff<i32, zext_bin_rmw_8_32<rmw_8>, regPlusImm, inst8_32>; + def : BinRMWPatImmOff<i32, zext_bin_rmw_16_32<rmw_16>, regPlusImm, inst16_32>; + def : BinRMWPatImmOff<i64, zext_bin_rmw_8_64<rmw_8>, regPlusImm, inst8_64>; + def : BinRMWPatImmOff<i64, zext_bin_rmw_16_64<rmw_16>, regPlusImm, inst16_64>; + def : BinRMWPatImmOff<i64, zext_bin_rmw_32_64<rmw_32>, regPlusImm, inst32_64>; + def : BinRMWPatImmOff<i32, zext_bin_rmw_8_32<rmw_8>, or_is_add, inst8_32>; + def : BinRMWPatImmOff<i32, zext_bin_rmw_16_32<rmw_16>, or_is_add, inst16_32>; + def : BinRMWPatImmOff<i64, zext_bin_rmw_8_64<rmw_8>, or_is_add, inst8_64>; + def : BinRMWPatImmOff<i64, zext_bin_rmw_16_64<rmw_16>, or_is_add, inst16_64>; + def : BinRMWPatImmOff<i64, zext_bin_rmw_32_64<rmw_32>, or_is_add, inst32_64>; + + def : BinRMWPatImmOff<i32, sext_bin_rmw_8_32<rmw_8>, regPlusImm, inst8_32>; + def : BinRMWPatImmOff<i32, sext_bin_rmw_16_32<rmw_16>, regPlusImm, inst16_32>; + def : BinRMWPatImmOff<i64, sext_bin_rmw_8_64<rmw_8>, regPlusImm, inst8_64>; + def : BinRMWPatImmOff<i64, sext_bin_rmw_16_64<rmw_16>, regPlusImm, inst16_64>; + def : BinRMWPatImmOff<i32, sext_bin_rmw_8_32<rmw_8>, or_is_add, inst8_32>; + def : BinRMWPatImmOff<i32, sext_bin_rmw_16_32<rmw_16>, or_is_add, inst16_32>; + def : BinRMWPatImmOff<i64, sext_bin_rmw_8_64<rmw_8>, or_is_add, inst8_64>; + def : BinRMWPatImmOff<i64, sext_bin_rmw_16_64<rmw_16>, or_is_add, inst16_64>; + + def : BinRMWPatGlobalAddr<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatGlobalAddr<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatGlobalAddr<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatGlobalAddr<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; + def : BinRMWPatGlobalAddr<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; + + def : BinRMWPatGlobalAddr<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatGlobalAddr<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatGlobalAddr<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatGlobalAddr<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; + + def : BinRMWPatExternalSym<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatExternalSym<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatExternalSym<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatExternalSym<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; + def : BinRMWPatExternalSym<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; + + def : BinRMWPatExternalSym<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatExternalSym<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatExternalSym<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatExternalSym<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; + + // Truncating-extending binary RMWs with just a constant offset + def : BinRMWPatOffsetOnly<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatOffsetOnly<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatOffsetOnly<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatOffsetOnly<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; + def : BinRMWPatOffsetOnly<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; + + def : BinRMWPatOffsetOnly<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatOffsetOnly<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatOffsetOnly<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatOffsetOnly<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; + + def : BinRMWPatGlobalAddrOffOnly<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatGlobalAddrOffOnly<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatGlobalAddrOffOnly<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatGlobalAddrOffOnly<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; + def : BinRMWPatGlobalAddrOffOnly<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; + + def : BinRMWPatGlobalAddrOffOnly<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatGlobalAddrOffOnly<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatGlobalAddrOffOnly<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatGlobalAddrOffOnly<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; + + def : BinRMWPatExternSymOffOnly<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatExternSymOffOnly<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatExternSymOffOnly<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatExternSymOffOnly<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>; + def : BinRMWPatExternSymOffOnly<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>; + + def : BinRMWPatExternSymOffOnly<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>; + def : BinRMWPatExternSymOffOnly<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>; + def : BinRMWPatExternSymOffOnly<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>; + def : BinRMWPatExternSymOffOnly<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>; +} + +let Predicates = [HasAtomics] in { +defm : BinRMWTruncExtPattern< + atomic_load_add_8, atomic_load_add_16, atomic_load_add_32, atomic_load_add_64, + ATOMIC_RMW8_U_ADD_I32, ATOMIC_RMW16_U_ADD_I32, + ATOMIC_RMW8_U_ADD_I64, ATOMIC_RMW16_U_ADD_I64, ATOMIC_RMW32_U_ADD_I64>; +defm : BinRMWTruncExtPattern< + atomic_load_sub_8, atomic_load_sub_16, atomic_load_sub_32, atomic_load_sub_64, + ATOMIC_RMW8_U_SUB_I32, ATOMIC_RMW16_U_SUB_I32, + ATOMIC_RMW8_U_SUB_I64, ATOMIC_RMW16_U_SUB_I64, ATOMIC_RMW32_U_SUB_I64>; +defm : BinRMWTruncExtPattern< + atomic_load_and_8, atomic_load_and_16, atomic_load_and_32, atomic_load_and_64, + ATOMIC_RMW8_U_AND_I32, ATOMIC_RMW16_U_AND_I32, + ATOMIC_RMW8_U_AND_I64, ATOMIC_RMW16_U_AND_I64, ATOMIC_RMW32_U_AND_I64>; +defm : BinRMWTruncExtPattern< + atomic_load_or_8, atomic_load_or_16, atomic_load_or_32, atomic_load_or_64, + ATOMIC_RMW8_U_OR_I32, ATOMIC_RMW16_U_OR_I32, + ATOMIC_RMW8_U_OR_I64, ATOMIC_RMW16_U_OR_I64, ATOMIC_RMW32_U_OR_I64>; +defm : BinRMWTruncExtPattern< + atomic_load_xor_8, atomic_load_xor_16, atomic_load_xor_32, atomic_load_xor_64, + ATOMIC_RMW8_U_XOR_I32, ATOMIC_RMW16_U_XOR_I32, + ATOMIC_RMW8_U_XOR_I64, ATOMIC_RMW16_U_XOR_I64, ATOMIC_RMW32_U_XOR_I64>; +defm : BinRMWTruncExtPattern< + atomic_swap_8, atomic_swap_16, atomic_swap_32, atomic_swap_64, + ATOMIC_RMW8_U_XCHG_I32, ATOMIC_RMW16_U_XCHG_I32, + ATOMIC_RMW8_U_XCHG_I64, ATOMIC_RMW16_U_XCHG_I64, ATOMIC_RMW32_U_XCHG_I64>; +} // Predicates = [HasAtomics] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td index 6b45839c14b0..34262752430c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td @@ -8,89 +8,111 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Call operand code-gen constructs. +/// WebAssembly Call operand code-gen constructs. /// //===----------------------------------------------------------------------===// // TODO: addr64: These currently assume the callee address is 32-bit. +// FIXME: add $type to first call_indirect asmstr (and maybe $flags) let Defs = [ARGUMENTS] in { // Call sequence markers. These have an immediate which represents the amount of // stack space to allocate or free, which is used for varargs lowering. let Uses = [SP32, SP64], Defs = [SP32, SP64], isCodeGenOnly = 1 in { -def ADJCALLSTACKDOWN : I<(outs), (ins i32imm:$amt, i32imm:$amt2), - [(WebAssemblycallseq_start timm:$amt, timm:$amt2)]>; -def ADJCALLSTACKUP : I<(outs), (ins i32imm:$amt, i32imm:$amt2), - [(WebAssemblycallseq_end timm:$amt, timm:$amt2)]>; +defm ADJCALLSTACKDOWN : NRI<(outs), (ins i32imm:$amt, i32imm:$amt2), + [(WebAssemblycallseq_start timm:$amt, timm:$amt2)]>; +defm ADJCALLSTACKUP : NRI<(outs), (ins i32imm:$amt, i32imm:$amt2), + [(WebAssemblycallseq_end timm:$amt, timm:$amt2)]>; } // isCodeGenOnly = 1 multiclass CALL<WebAssemblyRegClass vt, string prefix> { - def CALL_#vt : I<(outs vt:$dst), (ins function32_op:$callee, variable_ops), - [(set vt:$dst, (WebAssemblycall1 (i32 imm:$callee)))], - !strconcat(prefix, "call\t$dst, $callee"), - 0x10>; + defm CALL_#vt : I<(outs vt:$dst), (ins function32_op:$callee, variable_ops), + (outs), (ins function32_op:$callee), + [(set vt:$dst, (WebAssemblycall1 (i32 imm:$callee)))], + !strconcat(prefix, "call\t$dst, $callee"), + !strconcat(prefix, "call\t$callee"), + 0x10>; let isCodeGenOnly = 1 in { - def PCALL_INDIRECT_#vt : I<(outs vt:$dst), (ins I32:$callee, variable_ops), - [(set vt:$dst, (WebAssemblycall1 I32:$callee))], - "PSEUDO CALL INDIRECT\t$callee">; + defm PCALL_INDIRECT_#vt : I<(outs vt:$dst), (ins I32:$callee, variable_ops), + (outs), (ins I32:$callee), + [(set vt:$dst, (WebAssemblycall1 I32:$callee))], + "PSEUDO CALL INDIRECT\t$callee", + "PSEUDO CALL INDIRECT\t$callee">; } // isCodeGenOnly = 1 - def CALL_INDIRECT_#vt : I<(outs vt:$dst), - (ins TypeIndex:$type, i32imm:$flags, variable_ops), - [], - !strconcat(prefix, "call_indirect\t$dst"), - 0x11>; + defm CALL_INDIRECT_#vt : I<(outs vt:$dst), + (ins TypeIndex:$type, i32imm:$flags, variable_ops), + (outs), (ins TypeIndex:$type, i32imm:$flags), + [], + !strconcat(prefix, "call_indirect\t$dst"), + !strconcat(prefix, "call_indirect\t$type"), + 0x11>; } multiclass SIMD_CALL<ValueType vt, string prefix> { - def CALL_#vt : SIMD_I<(outs V128:$dst), (ins function32_op:$callee, variable_ops), + defm CALL_#vt : SIMD_I<(outs V128:$dst), (ins function32_op:$callee, + variable_ops), + (outs), (ins function32_op:$callee), [(set (vt V128:$dst), - (WebAssemblycall1 (i32 imm:$callee)))], + (WebAssemblycall1 (i32 imm:$callee)))], !strconcat(prefix, "call\t$dst, $callee"), + !strconcat(prefix, "call\t$callee"), 0x10>; let isCodeGenOnly = 1 in { - def PCALL_INDIRECT_#vt : SIMD_I<(outs V128:$dst), - (ins I32:$callee, variable_ops), - [(set (vt V128:$dst), - (WebAssemblycall1 I32:$callee))], - "PSEUDO CALL INDIRECT\t$callee">; + defm PCALL_INDIRECT_#vt : SIMD_I<(outs V128:$dst), + (ins I32:$callee, variable_ops), + (outs), (ins I32:$callee), + [(set (vt V128:$dst), + (WebAssemblycall1 I32:$callee))], + "PSEUDO CALL INDIRECT\t$callee", + "PSEUDO CALL INDIRECT\t$callee">; } // isCodeGenOnly = 1 - def CALL_INDIRECT_#vt : SIMD_I<(outs V128:$dst), + defm CALL_INDIRECT_#vt : SIMD_I<(outs V128:$dst), (ins TypeIndex:$type, i32imm:$flags, - variable_ops), + variable_ops), + (outs), (ins TypeIndex:$type, i32imm:$flags), [], - !strconcat(prefix, "call_indirect\t$dst"), + !strconcat(prefix, + "call_indirect\t$dst"), + !strconcat(prefix, "call_indirect\t$type"), 0x11>; } let Uses = [SP32, SP64], isCall = 1 in { - defm : CALL<I32, "i32.">; - defm : CALL<I64, "i64.">; - defm : CALL<F32, "f32.">; - defm : CALL<F64, "f64.">; - defm : SIMD_CALL<v16i8, "i8x16.">; - defm : SIMD_CALL<v8i16, "i16x8.">; - defm : SIMD_CALL<v4i32, "i32x4.">; - defm : SIMD_CALL<v4f32, "f32x4.">; - - def CALL_VOID : I<(outs), (ins function32_op:$callee, variable_ops), - [(WebAssemblycall0 (i32 imm:$callee))], - "call \t$callee", 0x10>; + defm "" : CALL<I32, "i32.">; + defm "" : CALL<I64, "i64.">; + defm "" : CALL<F32, "f32.">; + defm "" : CALL<F64, "f64.">; + defm "" : CALL<EXCEPT_REF, "except_ref.">; + defm "" : SIMD_CALL<v16i8, "i8x16.">; + defm "" : SIMD_CALL<v8i16, "i16x8.">; + defm "" : SIMD_CALL<v4i32, "i32x4.">; + defm "" : SIMD_CALL<v4f32, "f32x4.">; + + defm CALL_VOID : I<(outs), (ins function32_op:$callee, variable_ops), + (outs), (ins function32_op:$callee), + [(WebAssemblycall0 (i32 imm:$callee))], + "call \t$callee", "call\t$callee", 0x10>; let isCodeGenOnly = 1 in { - def PCALL_INDIRECT_VOID : I<(outs), (ins I32:$callee, variable_ops), - [(WebAssemblycall0 I32:$callee)], - "PSEUDO CALL INDIRECT\t$callee">; + defm PCALL_INDIRECT_VOID : I<(outs), (ins I32:$callee, variable_ops), + (outs), (ins I32:$callee), + [(WebAssemblycall0 I32:$callee)], + "PSEUDO CALL INDIRECT\t$callee", + "PSEUDO CALL INDIRECT\t$callee">; } // isCodeGenOnly = 1 - def CALL_INDIRECT_VOID : I<(outs), - (ins TypeIndex:$type, i32imm:$flags, variable_ops), - [], - "call_indirect\t", 0x11>; + defm CALL_INDIRECT_VOID : I<(outs), + (ins TypeIndex:$type, i32imm:$flags, + variable_ops), + (outs), (ins TypeIndex:$type, i32imm:$flags), + [], + "call_indirect\t", "call_indirect\t$type", + 0x11>; } // Uses = [SP32,SP64], isCall = 1 } // Defs = [ARGUMENTS] @@ -112,6 +134,9 @@ def : Pat<(v4i32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), (CALL_v4i32 tglobaladdr:$callee)>, Requires<[HasSIMD128]>; def : Pat<(v4f32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), (CALL_v4f32 tglobaladdr:$callee)>, Requires<[HasSIMD128]>; +def : Pat<(ExceptRef + (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))), + (CALL_EXCEPT_REF tglobaladdr:$callee)>; def : Pat<(WebAssemblycall0 (WebAssemblywrapper tglobaladdr:$callee)), (CALL_VOID tglobaladdr:$callee)>; @@ -132,5 +157,8 @@ def : Pat<(v4i32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), (CALL_v4i32 texternalsym:$callee)>, Requires<[HasSIMD128]>; def : Pat<(v4f32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), (CALL_v4f32 texternalsym:$callee)>, Requires<[HasSIMD128]>; +def : Pat<(ExceptRef + (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))), + (CALL_EXCEPT_REF texternalsym:$callee)>; def : Pat<(WebAssemblycall0 (WebAssemblywrapper texternalsym:$callee)), (CALL_VOID texternalsym:$callee)>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td index 129794171464..d90244b90662 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly control-flow code-gen constructs. +/// WebAssembly control-flow code-gen constructs. /// //===----------------------------------------------------------------------===// @@ -16,15 +16,17 @@ let Defs = [ARGUMENTS] in { let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in { // The condition operand is a boolean value which WebAssembly represents as i32. -def BR_IF : I<(outs), (ins bb_op:$dst, I32:$cond), - [(brcond I32:$cond, bb:$dst)], - "br_if \t$dst, $cond", 0x0d>; +defm BR_IF : I<(outs), (ins bb_op:$dst, I32:$cond), + (outs), (ins bb_op:$dst), + [(brcond I32:$cond, bb:$dst)], + "br_if \t$dst, $cond", "br_if \t$dst", 0x0d>; let isCodeGenOnly = 1 in -def BR_UNLESS : I<(outs), (ins bb_op:$dst, I32:$cond), []>; +defm BR_UNLESS : I<(outs), (ins bb_op:$dst, I32:$cond), + (outs), (ins bb_op:$dst), []>; let isBarrier = 1 in { -def BR : I<(outs), (ins bb_op:$dst), - [(br bb:$dst)], - "br \t$dst", 0x0c>; +defm BR : NRI<(outs), (ins bb_op:$dst), + [(br bb:$dst)], + "br \t$dst", 0x0c>; } // isBarrier = 1 } // isBranch = 1, isTerminator = 1, hasCtrlDep = 1 @@ -42,92 +44,151 @@ let Defs = [ARGUMENTS] in { // currently. // Set TSFlags{0} to 1 to indicate that the variable_ops are immediates. // Set TSFlags{1} to 1 to indicate that the immediates represent labels. +// FIXME: this can't inherit from I<> since there is no way to inherit from a +// multiclass and still have the let statements. let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { -def BR_TABLE_I32 : I<(outs), (ins I32:$index, variable_ops), - [(WebAssemblybr_table I32:$index)], - "br_table \t$index", 0x0e> { +def BR_TABLE_I32 : NI<(outs), (ins I32:$index, variable_ops), + [(WebAssemblybr_table I32:$index)], 0, + "br_table \t$index", 0x0e> { let TSFlags{0} = 1; let TSFlags{1} = 1; } -def BR_TABLE_I64 : I<(outs), (ins I64:$index, variable_ops), - [(WebAssemblybr_table I64:$index)], - "br_table \t$index"> { +def BR_TABLE_I32_S : NI<(outs), (ins I32:$index), + [], 1, + "br_table \t$index", 0x0e> { + let TSFlags{0} = 1; + let TSFlags{1} = 1; +} +def BR_TABLE_I64 : NI<(outs), (ins I64:$index, variable_ops), + [(WebAssemblybr_table I64:$index)], 0, + "br_table \t$index"> { + let TSFlags{0} = 1; + let TSFlags{1} = 1; +} +def BR_TABLE_I64_S : NI<(outs), (ins I64:$index), + [], 1, + "br_table \t$index"> { let TSFlags{0} = 1; let TSFlags{1} = 1; } } // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 -// Placemarkers to indicate the start or end of a block, loop, or try scope. +// This is technically a control-flow instruction, since all it affects is the +// IP. +defm NOP : NRI<(outs), (ins), [], "nop", 0x01>; + +// Placemarkers to indicate the start or end of a block or loop scope. // These use/clobber VALUE_STACK to prevent them from being moved into the // middle of an expression tree. let Uses = [VALUE_STACK], Defs = [VALUE_STACK] in { -def BLOCK : I<(outs), (ins Signature:$sig), [], "block \t$sig", 0x02>; -def LOOP : I<(outs), (ins Signature:$sig), [], "loop \t$sig", 0x03>; -def TRY : I<(outs), (ins Signature:$sig), [], "try \t$sig", 0x06>; - -// END_BLOCK, END_LOOP, END_TRY, and END_FUNCTION are represented with the same -// opcode in wasm. -def END_BLOCK : I<(outs), (ins), [], "end_block", 0x0b>; -def END_LOOP : I<(outs), (ins), [], "end_loop", 0x0b>; -def END_TRY : I<(outs), (ins), [], "end_try", 0x0b>; +defm BLOCK : NRI<(outs), (ins Signature:$sig), [], "block \t$sig", 0x02>; +defm LOOP : NRI<(outs), (ins Signature:$sig), [], "loop \t$sig", 0x03>; + +// END_BLOCK, END_LOOP, and END_FUNCTION are represented with the same opcode in +// wasm. +defm END_BLOCK : NRI<(outs), (ins), [], "end_block", 0x0b>; +defm END_LOOP : NRI<(outs), (ins), [], "end_loop", 0x0b>; let isTerminator = 1, isBarrier = 1 in -def END_FUNCTION : I<(outs), (ins), [], "end_function", 0x0b>; +defm END_FUNCTION : NRI<(outs), (ins), [], "end_function", 0x0b>; } // Uses = [VALUE_STACK], Defs = [VALUE_STACK] multiclass RETURN<WebAssemblyRegClass vt> { - def RETURN_#vt : I<(outs), (ins vt:$val), [(WebAssemblyreturn vt:$val)], - "return \t$val", 0x0f>; + defm RETURN_#vt : I<(outs), (ins vt:$val), (outs), (ins), + [(WebAssemblyreturn vt:$val)], + "return \t$val", "return", 0x0f>; // Equivalent to RETURN_#vt, for use at the end of a function when wasm // semantics return by falling off the end of the block. let isCodeGenOnly = 1 in - def FALLTHROUGH_RETURN_#vt : I<(outs), (ins vt:$val), []>; + defm FALLTHROUGH_RETURN_#vt : I<(outs), (ins vt:$val), (outs), (ins), []>; } multiclass SIMD_RETURN<ValueType vt> { - def RETURN_#vt : SIMD_I<(outs), (ins V128:$val), - [(WebAssemblyreturn (vt V128:$val))], - "return \t$val", 0x0f>; + defm RETURN_#vt : SIMD_I<(outs), (ins V128:$val), (outs), (ins), + [(WebAssemblyreturn (vt V128:$val))], + "return \t$val", "return", 0x0f>; // Equivalent to RETURN_#vt, for use at the end of a function when wasm // semantics return by falling off the end of the block. let isCodeGenOnly = 1 in - def FALLTHROUGH_RETURN_#vt : SIMD_I<(outs), (ins V128:$val), []>; + defm FALLTHROUGH_RETURN_#vt : SIMD_I<(outs), (ins V128:$val), (outs), (ins), + []>; } let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { let isReturn = 1 in { - defm : RETURN<I32>; - defm : RETURN<I64>; - defm : RETURN<F32>; - defm : RETURN<F64>; - defm : SIMD_RETURN<v16i8>; - defm : SIMD_RETURN<v8i16>; - defm : SIMD_RETURN<v4i32>; - defm : SIMD_RETURN<v4f32>; - - def RETURN_VOID : I<(outs), (ins), [(WebAssemblyreturn)], "return", 0x0f>; + defm "": RETURN<I32>; + defm "": RETURN<I64>; + defm "": RETURN<F32>; + defm "": RETURN<F64>; + defm "": RETURN<EXCEPT_REF>; + defm "": SIMD_RETURN<v16i8>; + defm "": SIMD_RETURN<v8i16>; + defm "": SIMD_RETURN<v4i32>; + defm "": SIMD_RETURN<v4f32>; + + defm RETURN_VOID : NRI<(outs), (ins), [(WebAssemblyreturn)], "return", 0x0f>; // This is to RETURN_VOID what FALLTHROUGH_RETURN_#vt is to RETURN_#vt. let isCodeGenOnly = 1 in - def FALLTHROUGH_RETURN_VOID : I<(outs), (ins), []>; + defm FALLTHROUGH_RETURN_VOID : NRI<(outs), (ins), []>; } // isReturn = 1 -def UNREACHABLE : I<(outs), (ins), [(trap)], "unreachable", 0x00>; +defm UNREACHABLE : NRI<(outs), (ins), [(trap)], "unreachable", 0x00>; +} // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 + +//===----------------------------------------------------------------------===// +// Exception handling instructions +//===----------------------------------------------------------------------===// -def THROW_I32 : I<(outs), (ins i32imm:$tag, I32:$obj), - [(int_wasm_throw imm:$tag, I32:$obj)], "throw \t$tag, $obj", - 0x08>; -def THROW_I64 : I<(outs), (ins i32imm:$tag, I64:$obj), - [(int_wasm_throw imm:$tag, I64:$obj)], "throw \t$tag, $obj", - 0x08>; -def RETHROW : I<(outs), (ins i32imm:$rel_depth), [], "rethrow \t$rel_depth", - 0x09>; +let Predicates = [HasExceptionHandling] in { +// Throwing an exception: throw / rethrow +let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { +defm THROW_I32 : I<(outs), (ins i32imm:$tag, I32:$val), + (outs), (ins i32imm:$tag), + [(int_wasm_throw imm:$tag, I32:$val)], + "throw \t$tag, $val", "throw \t$tag", + 0x08>; +defm THROW_I64 : I<(outs), (ins i32imm:$tag, I64:$val), + (outs), (ins i32imm:$tag), + [(int_wasm_throw imm:$tag, I64:$val)], + "throw \t$tag, $val", "throw \t$tag", + 0x08>; +defm RETHROW : NRI<(outs), (ins bb_op:$dst), [], "rethrow \t$dst", 0x09>; +let isCodeGenOnly = 1 in +// This is used when the destination for rethrow is the caller function. This +// will be converted to a rethrow in CFGStackify. +defm RETHROW_TO_CALLER : NRI<(outs), (ins), [], "rethrow">; } // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 -} // Defs = [ARGUMENTS] +// Region within which an exception is caught: try / end_try +let Uses = [VALUE_STACK], Defs = [VALUE_STACK] in { +defm TRY : NRI<(outs), (ins Signature:$sig), [], "try \t$sig", 0x06>; +defm END_TRY : NRI<(outs), (ins), [], "end_try", 0x0b>; +} // Uses = [VALUE_STACK], Defs = [VALUE_STACK] -// rethrow takes a relative depth as an argument, for which currently only 0 is -// possible for C++. Once other languages need depths other than 0, depths will -// be computed in CFGStackify. -def : Pat<(int_wasm_rethrow), (RETHROW 0)>; +// Catching an exception: catch / catch_all +let hasCtrlDep = 1 in { +defm CATCH_I32 : I<(outs I32:$dst), (ins i32imm:$tag), + (outs), (ins i32imm:$tag), + [(set I32:$dst, (int_wasm_catch imm:$tag))], + "i32.catch \t$dst, $tag", "i32.catch \t$tag", 0x07>; +defm CATCH_I64 : I<(outs I64:$dst), (ins i32imm:$tag), + (outs), (ins i32imm:$tag), + [(set I64:$dst, (int_wasm_catch imm:$tag))], + "i64.catch \t$dst, $tag", "i64.catch \t$tag", 0x07>; +defm CATCH_ALL : NRI<(outs), (ins), [], "catch_all", 0x05>; +} + +// Pseudo instructions: cleanupret / catchret +// They are not return instructions in wasm, but setting 'isReturn' to true as +// in X86 is necessary for computing EH scope membership. +let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, + isCodeGenOnly = 1, isReturn = 1 in { + defm CLEANUPRET : NRI<(outs), (ins), [(cleanupret)], "", 0>; + defm CATCHRET : NRI<(outs), (ins bb_op:$dst, bb_op:$from), + [(catchret bb:$dst, bb:$from)], "", 0>; +} +} + +} // Defs = [ARGUMENTS] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td index 426c2c802172..c89c1b549816 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td @@ -8,41 +8,48 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly datatype conversions, truncations, reinterpretations, +/// WebAssembly datatype conversions, truncations, reinterpretations, /// promotions, and demotions operand code-gen constructs. /// //===----------------------------------------------------------------------===// let Defs = [ARGUMENTS] in { -def I32_WRAP_I64 : I<(outs I32:$dst), (ins I64:$src), +defm I32_WRAP_I64 : I<(outs I32:$dst), (ins I64:$src), (outs), (ins), [(set I32:$dst, (trunc I64:$src))], - "i32.wrap/i64\t$dst, $src", 0xa7>; + "i32.wrap/i64\t$dst, $src", "i32.wrap/i64", 0xa7>; -def I64_EXTEND_S_I32 : I<(outs I64:$dst), (ins I32:$src), +defm I64_EXTEND_S_I32 : I<(outs I64:$dst), (ins I32:$src), (outs), (ins), [(set I64:$dst, (sext I32:$src))], - "i64.extend_s/i32\t$dst, $src", 0xac>; -def I64_EXTEND_U_I32 : I<(outs I64:$dst), (ins I32:$src), - [(set I64:$dst, (zext I32:$src))], - "i64.extend_u/i32\t$dst, $src", 0xad>; + "i64.extend_s/i32\t$dst, $src", "i64.extend_s/i32", + 0xac>; +defm I64_EXTEND_U_I32 : I<(outs I64:$dst), (ins I32:$src), (outs), (ins), + [(set I64:$dst, (zext I32:$src))], + "i64.extend_u/i32\t$dst, $src", "i64.extend_u/i32", + 0xad>; -let Predicates = [HasAtomics] in { -def I32_EXTEND8_S_I32 : I<(outs I32:$dst), (ins I32:$src), - [(set I32:$dst, (sext_inreg I32:$src, i8))], - "i32.extend8_s\t$dst, $src", 0xc0>; -def I32_EXTEND16_S_I32 : I<(outs I32:$dst), (ins I32:$src), - [(set I32:$dst, (sext_inreg I32:$src, i16))], - "i32.extend16_s\t$dst, $src", 0xc1>; -def I64_EXTEND8_S_I64 : I<(outs I64:$dst), (ins I64:$src), - [(set I64:$dst, (sext_inreg I64:$src, i8))], - "i64.extend8_s\t$dst, $src", 0xc2>; -def I64_EXTEND16_S_I64 : I<(outs I64:$dst), (ins I64:$src), - [(set I64:$dst, (sext_inreg I64:$src, i16))], - "i64.extend16_s\t$dst, $src", 0xc3>; -def I64_EXTEND32_S_I64 : I<(outs I64:$dst), (ins I64:$src), - [(set I64:$dst, (sext_inreg I64:$src, i32))], - "i64.extend32_s\t$dst, $src", 0xc4>; -} // Predicates = [HasAtomics] +let Predicates = [HasSignExt] in { +defm I32_EXTEND8_S_I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins), + [(set I32:$dst, (sext_inreg I32:$src, i8))], + "i32.extend8_s\t$dst, $src", "i32.extend8_s", + 0xc0>; +defm I32_EXTEND16_S_I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins), + [(set I32:$dst, (sext_inreg I32:$src, i16))], + "i32.extend16_s\t$dst, $src", "i32.extend16_s", + 0xc1>; +defm I64_EXTEND8_S_I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins), + [(set I64:$dst, (sext_inreg I64:$src, i8))], + "i64.extend8_s\t$dst, $src", "i64.extend8_s", + 0xc2>; +defm I64_EXTEND16_S_I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins), + [(set I64:$dst, (sext_inreg I64:$src, i16))], + "i64.extend16_s\t$dst, $src", "i64.extend16_s", + 0xc3>; +defm I64_EXTEND32_S_I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins), + [(set I64:$dst, (sext_inreg I64:$src, i32))], + "i64.extend32_s\t$dst, $src", "i64.extend32_s", + 0xc4>; +} // Predicates = [HasSignExt] } // defs = [ARGUMENTS] @@ -55,131 +62,161 @@ let Defs = [ARGUMENTS] in { // Conversion from floating point to integer instructions which don't trap on // overflow or invalid. -def I32_TRUNC_S_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), - [(set I32:$dst, (fp_to_sint F32:$src))], - "i32.trunc_s:sat/f32\t$dst, $src", 0xfc00>, - Requires<[HasNontrappingFPToInt]>; -def I32_TRUNC_U_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), - [(set I32:$dst, (fp_to_uint F32:$src))], - "i32.trunc_u:sat/f32\t$dst, $src", 0xfc01>, - Requires<[HasNontrappingFPToInt]>; -def I64_TRUNC_S_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), - [(set I64:$dst, (fp_to_sint F32:$src))], - "i64.trunc_s:sat/f32\t$dst, $src", 0xfc04>, - Requires<[HasNontrappingFPToInt]>; -def I64_TRUNC_U_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), - [(set I64:$dst, (fp_to_uint F32:$src))], - "i64.trunc_u:sat/f32\t$dst, $src", 0xfc05>, - Requires<[HasNontrappingFPToInt]>; -def I32_TRUNC_S_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), - [(set I32:$dst, (fp_to_sint F64:$src))], - "i32.trunc_s:sat/f64\t$dst, $src", 0xfc02>, - Requires<[HasNontrappingFPToInt]>; -def I32_TRUNC_U_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), - [(set I32:$dst, (fp_to_uint F64:$src))], - "i32.trunc_u:sat/f64\t$dst, $src", 0xfc03>, - Requires<[HasNontrappingFPToInt]>; -def I64_TRUNC_S_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), - [(set I64:$dst, (fp_to_sint F64:$src))], - "i64.trunc_s:sat/f64\t$dst, $src", 0xfc06>, - Requires<[HasNontrappingFPToInt]>; -def I64_TRUNC_U_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), - [(set I64:$dst, (fp_to_uint F64:$src))], - "i64.trunc_u:sat/f64\t$dst, $src", 0xfc07>, - Requires<[HasNontrappingFPToInt]>; +defm I32_TRUNC_S_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins), + [(set I32:$dst, (fp_to_sint F32:$src))], + "i32.trunc_s:sat/f32\t$dst, $src", + "i32.trunc_s:sat/f32", 0xfc00>, + Requires<[HasNontrappingFPToInt]>; +defm I32_TRUNC_U_SAT_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins), + [(set I32:$dst, (fp_to_uint F32:$src))], + "i32.trunc_u:sat/f32\t$dst, $src", + "i32.trunc_u:sat/f32", 0xfc01>, + Requires<[HasNontrappingFPToInt]>; +defm I64_TRUNC_S_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins), + [(set I64:$dst, (fp_to_sint F32:$src))], + "i64.trunc_s:sat/f32\t$dst, $src", + "i64.trunc_s:sat/f32", 0xfc04>, + Requires<[HasNontrappingFPToInt]>; +defm I64_TRUNC_U_SAT_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins), + [(set I64:$dst, (fp_to_uint F32:$src))], + "i64.trunc_u:sat/f32\t$dst, $src", + "i64.trunc_u:sat/f32", 0xfc05>, + Requires<[HasNontrappingFPToInt]>; +defm I32_TRUNC_S_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins), + [(set I32:$dst, (fp_to_sint F64:$src))], + "i32.trunc_s:sat/f64\t$dst, $src", + "i32.trunc_s:sat/f64", 0xfc02>, + Requires<[HasNontrappingFPToInt]>; +defm I32_TRUNC_U_SAT_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins), + [(set I32:$dst, (fp_to_uint F64:$src))], + "i32.trunc_u:sat/f64\t$dst, $src", + "i32.trunc_u:sat/f64", 0xfc03>, + Requires<[HasNontrappingFPToInt]>; +defm I64_TRUNC_S_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins), + [(set I64:$dst, (fp_to_sint F64:$src))], + "i64.trunc_s:sat/f64\t$dst, $src", + "i64.trunc_s:sat/f64", 0xfc06>, + Requires<[HasNontrappingFPToInt]>; +defm I64_TRUNC_U_SAT_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins), + [(set I64:$dst, (fp_to_uint F64:$src))], + "i64.trunc_u:sat/f64\t$dst, $src", + "i64.trunc_u:sat/f64", 0xfc07>, + Requires<[HasNontrappingFPToInt]>; // Conversion from floating point to integer pseudo-instructions which don't // trap on overflow or invalid. let usesCustomInserter = 1, isCodeGenOnly = 1 in { -def FP_TO_SINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src), - [(set I32:$dst, (fp_to_sint F32:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; -def FP_TO_UINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src), - [(set I32:$dst, (fp_to_uint F32:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; -def FP_TO_SINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src), - [(set I64:$dst, (fp_to_sint F32:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; -def FP_TO_UINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src), - [(set I64:$dst, (fp_to_uint F32:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; -def FP_TO_SINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src), - [(set I32:$dst, (fp_to_sint F64:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; -def FP_TO_UINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src), - [(set I32:$dst, (fp_to_uint F64:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; -def FP_TO_SINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src), - [(set I64:$dst, (fp_to_sint F64:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; -def FP_TO_UINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src), - [(set I64:$dst, (fp_to_uint F64:$src))], "", 0>, - Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_SINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins), + [(set I32:$dst, (fp_to_sint F32:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_UINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins), + [(set I32:$dst, (fp_to_uint F32:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_SINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins), + [(set I64:$dst, (fp_to_sint F32:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_UINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins), + [(set I64:$dst, (fp_to_uint F32:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_SINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins), + [(set I32:$dst, (fp_to_sint F64:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_UINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins), + [(set I32:$dst, (fp_to_uint F64:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_SINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins), + [(set I64:$dst, (fp_to_sint F64:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; +defm FP_TO_UINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins), + [(set I64:$dst, (fp_to_uint F64:$src))], "", "", 0>, + Requires<[NotHasNontrappingFPToInt]>; } // usesCustomInserter, isCodeGenOnly = 1 // Conversion from floating point to integer traps on overflow and invalid. let hasSideEffects = 1 in { -def I32_TRUNC_S_F32 : I<(outs I32:$dst), (ins F32:$src), - [], "i32.trunc_s/f32\t$dst, $src", 0xa8>; -def I32_TRUNC_U_F32 : I<(outs I32:$dst), (ins F32:$src), - [], "i32.trunc_u/f32\t$dst, $src", 0xa9>; -def I64_TRUNC_S_F32 : I<(outs I64:$dst), (ins F32:$src), - [], "i64.trunc_s/f32\t$dst, $src", 0xae>; -def I64_TRUNC_U_F32 : I<(outs I64:$dst), (ins F32:$src), - [], "i64.trunc_u/f32\t$dst, $src", 0xaf>; -def I32_TRUNC_S_F64 : I<(outs I32:$dst), (ins F64:$src), - [], "i32.trunc_s/f64\t$dst, $src", 0xaa>; -def I32_TRUNC_U_F64 : I<(outs I32:$dst), (ins F64:$src), - [], "i32.trunc_u/f64\t$dst, $src", 0xab>; -def I64_TRUNC_S_F64 : I<(outs I64:$dst), (ins F64:$src), - [], "i64.trunc_s/f64\t$dst, $src", 0xb0>; -def I64_TRUNC_U_F64 : I<(outs I64:$dst), (ins F64:$src), - [], "i64.trunc_u/f64\t$dst, $src", 0xb1>; +defm I32_TRUNC_S_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins), + [], "i32.trunc_s/f32\t$dst, $src", "i32.trunc_s/f32", + 0xa8>; +defm I32_TRUNC_U_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins), + [], "i32.trunc_u/f32\t$dst, $src", "i32.trunc_u/f32", + 0xa9>; +defm I64_TRUNC_S_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins), + [], "i64.trunc_s/f32\t$dst, $src", "i64.trunc_s/f32", + 0xae>; +defm I64_TRUNC_U_F32 : I<(outs I64:$dst), (ins F32:$src), (outs), (ins), + [], "i64.trunc_u/f32\t$dst, $src", "i64.trunc_u/f32", + 0xaf>; +defm I32_TRUNC_S_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins), + [], "i32.trunc_s/f64\t$dst, $src", "i32.trunc_s/f64", + 0xaa>; +defm I32_TRUNC_U_F64 : I<(outs I32:$dst), (ins F64:$src), (outs), (ins), + [], "i32.trunc_u/f64\t$dst, $src", "i32.trunc_u/f64", + 0xab>; +defm I64_TRUNC_S_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins), + [], "i64.trunc_s/f64\t$dst, $src", "i64.trunc_s/f64", + 0xb0>; +defm I64_TRUNC_U_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins), + [], "i64.trunc_u/f64\t$dst, $src", "i64.trunc_u/f64", + 0xb1>; } // hasSideEffects = 1 -def F32_CONVERT_S_I32 : I<(outs F32:$dst), (ins I32:$src), - [(set F32:$dst, (sint_to_fp I32:$src))], - "f32.convert_s/i32\t$dst, $src", 0xb2>; -def F32_CONVERT_U_I32 : I<(outs F32:$dst), (ins I32:$src), - [(set F32:$dst, (uint_to_fp I32:$src))], - "f32.convert_u/i32\t$dst, $src", 0xb3>; -def F64_CONVERT_S_I32 : I<(outs F64:$dst), (ins I32:$src), - [(set F64:$dst, (sint_to_fp I32:$src))], - "f64.convert_s/i32\t$dst, $src", 0xb7>; -def F64_CONVERT_U_I32 : I<(outs F64:$dst), (ins I32:$src), - [(set F64:$dst, (uint_to_fp I32:$src))], - "f64.convert_u/i32\t$dst, $src", 0xb8>; -def F32_CONVERT_S_I64 : I<(outs F32:$dst), (ins I64:$src), - [(set F32:$dst, (sint_to_fp I64:$src))], - "f32.convert_s/i64\t$dst, $src", 0xb4>; -def F32_CONVERT_U_I64 : I<(outs F32:$dst), (ins I64:$src), - [(set F32:$dst, (uint_to_fp I64:$src))], - "f32.convert_u/i64\t$dst, $src", 0xb5>; -def F64_CONVERT_S_I64 : I<(outs F64:$dst), (ins I64:$src), - [(set F64:$dst, (sint_to_fp I64:$src))], - "f64.convert_s/i64\t$dst, $src", 0xb9>; -def F64_CONVERT_U_I64 : I<(outs F64:$dst), (ins I64:$src), - [(set F64:$dst, (uint_to_fp I64:$src))], - "f64.convert_u/i64\t$dst, $src", 0xba>; +defm F32_CONVERT_S_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins), + [(set F32:$dst, (sint_to_fp I32:$src))], + "f32.convert_s/i32\t$dst, $src", "f32.convert_s/i32", + 0xb2>; +defm F32_CONVERT_U_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins), + [(set F32:$dst, (uint_to_fp I32:$src))], + "f32.convert_u/i32\t$dst, $src", "f32.convert_u/i32", + 0xb3>; +defm F64_CONVERT_S_I32 : I<(outs F64:$dst), (ins I32:$src), (outs), (ins), + [(set F64:$dst, (sint_to_fp I32:$src))], + "f64.convert_s/i32\t$dst, $src", "f64.convert_s/i32", + 0xb7>; +defm F64_CONVERT_U_I32 : I<(outs F64:$dst), (ins I32:$src), (outs), (ins), + [(set F64:$dst, (uint_to_fp I32:$src))], + "f64.convert_u/i32\t$dst, $src", "f64.convert_u/i32", + 0xb8>; +defm F32_CONVERT_S_I64 : I<(outs F32:$dst), (ins I64:$src), (outs), (ins), + [(set F32:$dst, (sint_to_fp I64:$src))], + "f32.convert_s/i64\t$dst, $src", "f32.convert_s/i64", + 0xb4>; +defm F32_CONVERT_U_I64 : I<(outs F32:$dst), (ins I64:$src), (outs), (ins), + [(set F32:$dst, (uint_to_fp I64:$src))], + "f32.convert_u/i64\t$dst, $src", "f32.convert_u/i64", + 0xb5>; +defm F64_CONVERT_S_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins), + [(set F64:$dst, (sint_to_fp I64:$src))], + "f64.convert_s/i64\t$dst, $src", "f64.convert_s/i64", + 0xb9>; +defm F64_CONVERT_U_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins), + [(set F64:$dst, (uint_to_fp I64:$src))], + "f64.convert_u/i64\t$dst, $src", "f64.convert_u/i64", + 0xba>; -def F64_PROMOTE_F32 : I<(outs F64:$dst), (ins F32:$src), - [(set F64:$dst, (fpextend F32:$src))], - "f64.promote/f32\t$dst, $src", 0xbb>; -def F32_DEMOTE_F64 : I<(outs F32:$dst), (ins F64:$src), - [(set F32:$dst, (fpround F64:$src))], - "f32.demote/f64\t$dst, $src", 0xb6>; +defm F64_PROMOTE_F32 : I<(outs F64:$dst), (ins F32:$src), (outs), (ins), + [(set F64:$dst, (fpextend F32:$src))], + "f64.promote/f32\t$dst, $src", "f64.promote/f32", + 0xbb>; +defm F32_DEMOTE_F64 : I<(outs F32:$dst), (ins F64:$src), (outs), (ins), + [(set F32:$dst, (fpround F64:$src))], + "f32.demote/f64\t$dst, $src", "f32.demote/f64", + 0xb6>; -def I32_REINTERPRET_F32 : I<(outs I32:$dst), (ins F32:$src), - [(set I32:$dst, (bitconvert F32:$src))], - "i32.reinterpret/f32\t$dst, $src", 0xbc>; -def F32_REINTERPRET_I32 : I<(outs F32:$dst), (ins I32:$src), - [(set F32:$dst, (bitconvert I32:$src))], - "f32.reinterpret/i32\t$dst, $src", 0xbe>; -def I64_REINTERPRET_F64 : I<(outs I64:$dst), (ins F64:$src), - [(set I64:$dst, (bitconvert F64:$src))], - "i64.reinterpret/f64\t$dst, $src", 0xbd>; -def F64_REINTERPRET_I64 : I<(outs F64:$dst), (ins I64:$src), - [(set F64:$dst, (bitconvert I64:$src))], - "f64.reinterpret/i64\t$dst, $src", 0xbf>; +defm I32_REINTERPRET_F32 : I<(outs I32:$dst), (ins F32:$src), (outs), (ins), + [(set I32:$dst, (bitconvert F32:$src))], + "i32.reinterpret/f32\t$dst, $src", + "i32.reinterpret/f32", 0xbc>; +defm F32_REINTERPRET_I32 : I<(outs F32:$dst), (ins I32:$src), (outs), (ins), + [(set F32:$dst, (bitconvert I32:$src))], + "f32.reinterpret/i32\t$dst, $src", + "f32.reinterpret/i32", 0xbe>; +defm I64_REINTERPRET_F64 : I<(outs I64:$dst), (ins F64:$src), (outs), (ins), + [(set I64:$dst, (bitconvert F64:$src))], + "i64.reinterpret/f64\t$dst, $src", + "i64.reinterpret/f64", 0xbd>; +defm F64_REINTERPRET_I64 : I<(outs F64:$dst), (ins I64:$src), (outs), (ins), + [(set F64:$dst, (bitconvert I64:$src))], + "f64.reinterpret/i64\t$dst, $src", + "f64.reinterpret/i64", 0xbf>; } // Defs = [ARGUMENTS] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td new file mode 100644 index 000000000000..41b39f69e51c --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrExceptRef.td @@ -0,0 +1,31 @@ +// WebAssemblyInstrExceptRef.td-WebAssembly except_ref codegen --*- tablegen -*- +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// WebAssembly except_ref operand code-gen constructs. +/// +//===----------------------------------------------------------------------===// + +let Defs = [ARGUMENTS] in { + +defm SELECT_EXCEPT_REF : I<(outs EXCEPT_REF:$dst), + (ins EXCEPT_REF:$lhs, EXCEPT_REF:$rhs, I32:$cond), + (outs), (ins), + [(set EXCEPT_REF:$dst, + (select I32:$cond, EXCEPT_REF:$lhs, + EXCEPT_REF:$rhs))], + "except_ref.select\t$dst, $lhs, $rhs, $cond", + "except_ref.select", 0x1b>; + +} // Defs = [ARGUMENTS] + +def : Pat<(select (i32 (setne I32:$cond, 0)), EXCEPT_REF:$lhs, EXCEPT_REF:$rhs), + (SELECT_EXCEPT_REF EXCEPT_REF:$lhs, EXCEPT_REF:$rhs, I32:$cond)>; +def : Pat<(select (i32 (seteq I32:$cond, 0)), EXCEPT_REF:$lhs, EXCEPT_REF:$rhs), + (SELECT_EXCEPT_REF EXCEPT_REF:$rhs, EXCEPT_REF:$lhs, I32:$cond)>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td index 03c9c1f8d5c0..8db75d38942b 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Floating-point operand code-gen constructs. +/// WebAssembly Floating-point operand code-gen constructs. /// //===----------------------------------------------------------------------===// @@ -77,12 +77,14 @@ def : Pat<(setge f64:$lhs, f64:$rhs), (GE_F64 f64:$lhs, f64:$rhs)>; let Defs = [ARGUMENTS] in { -def SELECT_F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs, I32:$cond), - [(set F32:$dst, (select I32:$cond, F32:$lhs, F32:$rhs))], - "f32.select\t$dst, $lhs, $rhs, $cond", 0x1b>; -def SELECT_F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs, I32:$cond), - [(set F64:$dst, (select I32:$cond, F64:$lhs, F64:$rhs))], - "f64.select\t$dst, $lhs, $rhs, $cond", 0x1b>; +defm SELECT_F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs, I32:$cond), + (outs), (ins), + [(set F32:$dst, (select I32:$cond, F32:$lhs, F32:$rhs))], + "f32.select\t$dst, $lhs, $rhs, $cond", "f32.select", 0x1b>; +defm SELECT_F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs, I32:$cond), + (outs), (ins), + [(set F64:$dst, (select I32:$cond, F64:$lhs, F64:$rhs))], + "f64.select\t$dst, $lhs, $rhs, $cond", "f64.select", 0x1b>; } // Defs = [ARGUMENTS] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td index 4f41fcc232e9..403152c80660 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td @@ -8,99 +8,160 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly instruction format definitions. +/// WebAssembly instruction format definitions. /// //===----------------------------------------------------------------------===// // WebAssembly Instruction Format. -class WebAssemblyInst<bits<32> inst, string asmstr> : Instruction { +// We instantiate 2 of these for every actual instruction (register based +// and stack based), see below. +class WebAssemblyInst<bits<32> inst, string asmstr, bit stack> : Instruction { field bits<32> Inst = inst; // Instruction encoding. + field bit StackBased = stack; let Namespace = "WebAssembly"; let Pattern = []; let AsmString = asmstr; } -// Normal instructions. -class I<dag oops, dag iops, list<dag> pattern, string asmstr = "", bits<32> inst = -1> - : WebAssemblyInst<inst, asmstr> { +// Normal instructions. Default instantiation of a WebAssemblyInst. +class NI<dag oops, dag iops, list<dag> pattern, bit stack, string asmstr = "", + bits<32> inst = -1> + : WebAssemblyInst<inst, asmstr, stack> { dag OutOperandList = oops; dag InOperandList = iops; let Pattern = pattern; } -class SIMD_I<dag oops, dag iops, list<dag> pattern, - string asmstr = "", bits<32> inst = -1> - : I<oops, iops, pattern, asmstr, inst>, Requires<[HasSIMD128]>; +// Generates both register and stack based versions of one actual instruction. +// We have 2 sets of operands (oops & iops) for the register and stack +// based version of this instruction, as well as the corresponding asmstr. +// The register versions have virtual-register operands which correspond to wasm +// locals or stack locations. Each use and def of the register corresponds to an +// implicit get_local / set_local or access of stack operands in wasm. These +// instructions are used for ISel and all MI passes. The stack versions of the +// instructions do not have register operands (they implicitly operate on the +// stack), and get_locals and set_locals are explicit. The register instructions +// are converted to their corresponding stack instructions before lowering to +// MC. +// Every instruction should want to be based on this multi-class to guarantee +// there is always an equivalent pair of instructions. +multiclass I<dag oops_r, dag iops_r, dag oops_s, dag iops_s, + list<dag> pattern_r, string asmstr_r = "", string asmstr_s = "", + bits<32> inst = -1> { + def "" : NI<oops_r, iops_r, pattern_r, 0, asmstr_r, inst>; + def _S : NI<oops_s, iops_s, [], 1, asmstr_s, inst>; +} + +// For instructions that have no register ops, so both sets are the same. +multiclass NRI<dag oops, dag iops, list<dag> pattern, string asmstr = "", + bits<32> inst = -1> { + defm "": I<oops, iops, oops, iops, pattern, asmstr, asmstr, inst>; +} -class ATOMIC_I<dag oops, dag iops, list<dag> pattern, - string asmstr = "", bits<32> inst = -1> - : I<oops, iops, pattern, asmstr, inst>, Requires<[HasAtomics]>; +multiclass SIMD_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s, + list<dag> pattern_r, string asmstr_r = "", + string asmstr_s = "", bits<32> inst = -1> { + defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s, + inst>, + Requires<[HasSIMD128]>; +} + +multiclass ATOMIC_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s, + list<dag> pattern_r, string asmstr_r = "", + string asmstr_s = "", bits<32> inst = -1> { + defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s, + inst>, + Requires<[HasAtomics]>; +} // Unary and binary instructions, for the local types that WebAssembly supports. -multiclass UnaryInt<SDNode node, string name, bits<32> i32Inst, bits<32> i64Inst> { - def _I32 : I<(outs I32:$dst), (ins I32:$src), - [(set I32:$dst, (node I32:$src))], - !strconcat("i32.", !strconcat(name, "\t$dst, $src")), i32Inst>; - def _I64 : I<(outs I64:$dst), (ins I64:$src), - [(set I64:$dst, (node I64:$src))], - !strconcat("i64.", !strconcat(name, "\t$dst, $src")), i64Inst>; +multiclass UnaryInt<SDNode node, string name, bits<32> i32Inst, + bits<32> i64Inst> { + defm _I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins), + [(set I32:$dst, (node I32:$src))], + !strconcat("i32.", !strconcat(name, "\t$dst, $src")), + !strconcat("i32.", name), i32Inst>; + defm _I64 : I<(outs I64:$dst), (ins I64:$src), (outs), (ins), + [(set I64:$dst, (node I64:$src))], + !strconcat("i64.", !strconcat(name, "\t$dst, $src")), + !strconcat("i64.", name), i64Inst>; } -multiclass BinaryInt<SDNode node, string name, bits<32> i32Inst, bits<32> i64Inst> { - def _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs), - [(set I32:$dst, (node I32:$lhs, I32:$rhs))], - !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")), i32Inst>; - def _I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs), - [(set I64:$dst, (node I64:$lhs, I64:$rhs))], - !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")), i64Inst>; +multiclass BinaryInt<SDNode node, string name, bits<32> i32Inst, + bits<32> i64Inst> { + defm _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs), (outs), (ins), + [(set I32:$dst, (node I32:$lhs, I32:$rhs))], + !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("i32.", name), i32Inst>; + defm _I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs), (outs), (ins), + [(set I64:$dst, (node I64:$lhs, I64:$rhs))], + !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("i64.", name), i64Inst>; } -multiclass UnaryFP<SDNode node, string name, bits<32> f32Inst, bits<32> f64Inst> { - def _F32 : I<(outs F32:$dst), (ins F32:$src), - [(set F32:$dst, (node F32:$src))], - !strconcat("f32.", !strconcat(name, "\t$dst, $src")), f32Inst>; - def _F64 : I<(outs F64:$dst), (ins F64:$src), - [(set F64:$dst, (node F64:$src))], - !strconcat("f64.", !strconcat(name, "\t$dst, $src")), f64Inst>; +multiclass UnaryFP<SDNode node, string name, bits<32> f32Inst, + bits<32> f64Inst> { + defm _F32 : I<(outs F32:$dst), (ins F32:$src), (outs), (ins), + [(set F32:$dst, (node F32:$src))], + !strconcat("f32.", !strconcat(name, "\t$dst, $src")), + !strconcat("f32.", name), f32Inst>; + defm _F64 : I<(outs F64:$dst), (ins F64:$src), (outs), (ins), + [(set F64:$dst, (node F64:$src))], + !strconcat("f64.", !strconcat(name, "\t$dst, $src")), + !strconcat("f64.", name), f64Inst>; } -multiclass BinaryFP<SDNode node, string name, bits<32> f32Inst, bits<32> f64Inst> { - def _F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs), - [(set F32:$dst, (node F32:$lhs, F32:$rhs))], - !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")), f32Inst>; - def _F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs), - [(set F64:$dst, (node F64:$lhs, F64:$rhs))], - !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")), f64Inst>; +multiclass BinaryFP<SDNode node, string name, bits<32> f32Inst, + bits<32> f64Inst> { + defm _F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs), (outs), (ins), + [(set F32:$dst, (node F32:$lhs, F32:$rhs))], + !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("f32.", name), f32Inst>; + defm _F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs), (outs), (ins), + [(set F64:$dst, (node F64:$lhs, F64:$rhs))], + !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("f64.", name), f64Inst>; } multiclass SIMDBinary<SDNode node, SDNode fnode, string name> { - def _I8x16 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), - [(set (v16i8 V128:$dst), (node V128:$lhs, V128:$rhs))], - !strconcat("i8x16.", !strconcat(name, "\t$dst, $lhs, $rhs"))>; - def _I16x8 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), - [(set (v8i16 V128:$dst), (node V128:$lhs, V128:$rhs))], - !strconcat("i16x8.", !strconcat(name, "\t$dst, $lhs, $rhs"))>; - def _I32x4 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), - [(set (v4i32 V128:$dst), (node V128:$lhs, V128:$rhs))], - !strconcat("i32x4.", !strconcat(name, "\t$dst, $lhs, $rhs"))>; - def _F32x4 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), - [(set (v4f32 V128:$dst), (fnode V128:$lhs, V128:$rhs))], - !strconcat("f32x4.", !strconcat(name, "\t$dst, $lhs, $rhs"))>; - + defm _I8x16 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), + (outs), (ins), + [(set (v16i8 V128:$dst), (node V128:$lhs, V128:$rhs))], + !strconcat("i8x16.", + !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("i8x16.", name)>; + defm _I16x8 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), + (outs), (ins), + [(set (v8i16 V128:$dst), (node V128:$lhs, V128:$rhs))], + !strconcat("i16x8.", + !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("i16x8.", name)>; + defm _I32x4 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), + (outs), (ins), + [(set (v4i32 V128:$dst), (node V128:$lhs, V128:$rhs))], + !strconcat("i32x4.", + !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("i32x4.", name)>; + defm _F32x4 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), + (outs), (ins), + [(set (v4f32 V128:$dst), (fnode V128:$lhs, V128:$rhs))], + !strconcat("f32x4.", + !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("f32x4.", name)>; } multiclass ComparisonInt<CondCode cond, string name, bits<32> i32Inst, bits<32> i64Inst> { - def _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs), - [(set I32:$dst, (setcc I32:$lhs, I32:$rhs, cond))], - !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")), - i32Inst>; - def _I64 : I<(outs I32:$dst), (ins I64:$lhs, I64:$rhs), - [(set I32:$dst, (setcc I64:$lhs, I64:$rhs, cond))], - !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")), - i64Inst>; + defm _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs), (outs), (ins), + [(set I32:$dst, (setcc I32:$lhs, I32:$rhs, cond))], + !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("i32.", name), i32Inst>; + defm _I64 : I<(outs I32:$dst), (ins I64:$lhs, I64:$rhs), (outs), (ins), + [(set I32:$dst, (setcc I64:$lhs, I64:$rhs, cond))], + !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("i64.", name), i64Inst>; } multiclass ComparisonFP<CondCode cond, string name, bits<32> f32Inst, bits<32> f64Inst> { - def _F32 : I<(outs I32:$dst), (ins F32:$lhs, F32:$rhs), - [(set I32:$dst, (setcc F32:$lhs, F32:$rhs, cond))], - !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")), - f32Inst>; - def _F64 : I<(outs I32:$dst), (ins F64:$lhs, F64:$rhs), - [(set I32:$dst, (setcc F64:$lhs, F64:$rhs, cond))], - !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")), - f64Inst>; + defm _F32 : I<(outs I32:$dst), (ins F32:$lhs, F32:$rhs), (outs), (ins), + [(set I32:$dst, (setcc F32:$lhs, F32:$rhs, cond))], + !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("f32.", name), f32Inst>; + defm _F64 : I<(outs I32:$dst), (ins F64:$lhs, F64:$rhs), (outs), (ins), + [(set I32:$dst, (setcc F64:$lhs, F64:$rhs, cond))], + !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")), + !strconcat("f64.", name), f64Inst>; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp index 8846952e5af4..cd49bd1682ad 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// TargetInstrInfo class. /// //===----------------------------------------------------------------------===// @@ -30,7 +30,8 @@ using namespace llvm; WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI) : WebAssemblyGenInstrInfo(WebAssembly::ADJCALLSTACKDOWN, - WebAssembly::ADJCALLSTACKUP), + WebAssembly::ADJCALLSTACKUP, + WebAssembly::CATCHRET), RI(STI.getTargetTriple()) {} bool WebAssemblyInstrInfo::isReallyTriviallyReMaterializable( @@ -151,7 +152,7 @@ unsigned WebAssemblyInstrInfo::removeBranch(MachineBasicBlock &MBB, while (I != MBB.instr_begin()) { --I; - if (I->isDebugValue()) + if (I->isDebugInstr()) continue; if (!I->isTerminator()) break; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h index eb74106336ed..4a3763c345b0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// TargetInstrInfo class. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td index f8d311ac3b00..aeb282a7febb 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Instruction definitions. +/// WebAssembly Instruction definitions. /// //===----------------------------------------------------------------------===// @@ -30,6 +30,24 @@ def NotHasNontrappingFPToInt : Predicate<"!Subtarget->hasNontrappingFPToInt()">, AssemblerPredicate<"!FeatureNontrappingFPToInt", "nontrapping-fptoint">; +def HasSignExt : + Predicate<"Subtarget->hasSignExt()">, + AssemblerPredicate<"FeatureSignExt", + "sign-ext">; +def NotHasSignExt : + Predicate<"!Subtarget->hasSignExt()">, + AssemblerPredicate<"!FeatureSignExt", + "sign-ext">; + +def HasExceptionHandling : + Predicate<"Subtarget->hasExceptionHandling()">, + AssemblerPredicate<"FeatureExceptionHandling", + "exception-handling">; + +def NotHasExceptionHandling : + Predicate<"!Subtarget->hasExceptionHandling()">, + AssemblerPredicate<"!FeatureExceptionHandling", + "exception-handling">; //===----------------------------------------------------------------------===// // WebAssembly-specific DAG Node Types. @@ -135,23 +153,26 @@ include "WebAssemblyInstrFormats.td" multiclass ARGUMENT<WebAssemblyRegClass vt> { let hasSideEffects = 1, Uses = [ARGUMENTS], isCodeGenOnly = 1 in - def ARGUMENT_#vt : I<(outs vt:$res), (ins i32imm:$argno), - [(set vt:$res, (WebAssemblyargument timm:$argno))]>; + defm ARGUMENT_#vt : I<(outs vt:$res), (ins i32imm:$argno), + (outs), (ins i32imm:$argno), + [(set vt:$res, (WebAssemblyargument timm:$argno))]>; } multiclass SIMD_ARGUMENT<ValueType vt> { let hasSideEffects = 1, Uses = [ARGUMENTS], isCodeGenOnly = 1 in - def ARGUMENT_#vt : SIMD_I<(outs V128:$res), (ins i32imm:$argno), - [(set (vt V128:$res), + defm ARGUMENT_#vt : SIMD_I<(outs V128:$res), (ins i32imm:$argno), + (outs), (ins i32imm:$argno), + [(set (vt V128:$res), (WebAssemblyargument timm:$argno))]>; } -defm : ARGUMENT<I32>; -defm : ARGUMENT<I64>; -defm : ARGUMENT<F32>; -defm : ARGUMENT<F64>; -defm : SIMD_ARGUMENT<v16i8>; -defm : SIMD_ARGUMENT<v8i16>; -defm : SIMD_ARGUMENT<v4i32>; -defm : SIMD_ARGUMENT<v4f32>; +defm "": ARGUMENT<I32>; +defm "": ARGUMENT<I64>; +defm "": ARGUMENT<F32>; +defm "": ARGUMENT<F64>; +defm "": ARGUMENT<EXCEPT_REF>; +defm "": SIMD_ARGUMENT<v16i8>; +defm "": SIMD_ARGUMENT<v8i16>; +defm "": SIMD_ARGUMENT<v4i32>; +defm "": SIMD_ARGUMENT<v4f32>; let Defs = [ARGUMENTS] in { @@ -165,69 +186,83 @@ let hasSideEffects = 0 in { // and set_local. COPYs are eliminated (and replaced with // get_local/set_local) in the ExplicitLocals pass. let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in - def COPY_#vt : I<(outs vt:$res), (ins vt:$src), [], "copy_local\t$res, $src">; + defm COPY_#vt : I<(outs vt:$res), (ins vt:$src), (outs), (ins), [], + "copy_local\t$res, $src", "copy_local">; // TEE is similar to COPY, but writes two copies of its result. Typically // this would be used to stackify one result and write the other result to a // local. let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in - def TEE_#vt : I<(outs vt:$res, vt:$also), (ins vt:$src), [], - "tee_local\t$res, $also, $src">; + defm TEE_#vt : I<(outs vt:$res, vt:$also), (ins vt:$src), (outs), (ins), [], + "tee_local\t$res, $also, $src", "tee_local">; // This is the actual get_local instruction in wasm. These are made explicit // by the ExplicitLocals pass. It has mayLoad because it reads from a wasm // local, which is a side effect not otherwise modeled in LLVM. let mayLoad = 1, isAsCheapAsAMove = 1 in - def GET_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local), [], - "get_local\t$res, $local", 0x20>; + defm GET_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local), + (outs), (ins local_op:$local), [], + "get_local\t$res, $local", "get_local\t$local", 0x20>; // This is the actual set_local instruction in wasm. These are made explicit // by the ExplicitLocals pass. It has mayStore because it writes to a wasm // local, which is a side effect not otherwise modeled in LLVM. let mayStore = 1, isAsCheapAsAMove = 1 in - def SET_LOCAL_#vt : I<(outs), (ins local_op:$local, vt:$src), [], - "set_local\t$local, $src", 0x21>; + defm SET_LOCAL_#vt : I<(outs), (ins local_op:$local, vt:$src), + (outs), (ins local_op:$local), [], + "set_local\t$local, $src", "set_local\t$local", 0x21>; // This is the actual tee_local instruction in wasm. TEEs are turned into // TEE_LOCALs by the ExplicitLocals pass. It has mayStore for the same reason // as SET_LOCAL. let mayStore = 1, isAsCheapAsAMove = 1 in - def TEE_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local, vt:$src), [], - "tee_local\t$res, $local, $src", 0x22>; + defm TEE_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local, vt:$src), + (outs), (ins local_op:$local), [], + "tee_local\t$res, $local, $src", "tee_local\t$local", + 0x22>; // Unused values must be dropped in some contexts. - def DROP_#vt : I<(outs), (ins vt:$src), [], - "drop\t$src", 0x1a>; + defm DROP_#vt : I<(outs), (ins vt:$src), (outs), (ins), [], + "drop\t$src", "drop", 0x1a>; let mayLoad = 1 in - def GET_GLOBAL_#vt : I<(outs vt:$res), (ins global_op:$local), [], - "get_global\t$res, $local", 0x23>; + defm GET_GLOBAL_#vt : I<(outs vt:$res), (ins global_op:$local), + (outs), (ins global_op:$local), [], + "get_global\t$res, $local", "get_global\t$local", + 0x23>; let mayStore = 1 in - def SET_GLOBAL_#vt : I<(outs), (ins global_op:$local, vt:$src), [], - "set_global\t$local, $src", 0x24>; + defm SET_GLOBAL_#vt : I<(outs), (ins global_op:$local, vt:$src), + (outs), (ins global_op:$local), [], + "set_global\t$local, $src", "set_global\t$local", + 0x24>; } // hasSideEffects = 0 } -defm : LOCAL<I32>; -defm : LOCAL<I64>; -defm : LOCAL<F32>; -defm : LOCAL<F64>; -defm : LOCAL<V128>, Requires<[HasSIMD128]>; +defm "" : LOCAL<I32>; +defm "" : LOCAL<I64>; +defm "" : LOCAL<F32>; +defm "" : LOCAL<F64>; +defm "" : LOCAL<V128>, Requires<[HasSIMD128]>; +defm "" : LOCAL<EXCEPT_REF>, Requires<[HasExceptionHandling]>; let isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 in { -def CONST_I32 : I<(outs I32:$res), (ins i32imm_op:$imm), - [(set I32:$res, imm:$imm)], - "i32.const\t$res, $imm", 0x41>; -def CONST_I64 : I<(outs I64:$res), (ins i64imm_op:$imm), - [(set I64:$res, imm:$imm)], - "i64.const\t$res, $imm", 0x42>; -def CONST_F32 : I<(outs F32:$res), (ins f32imm_op:$imm), - [(set F32:$res, fpimm:$imm)], - "f32.const\t$res, $imm", 0x43>; -def CONST_F64 : I<(outs F64:$res), (ins f64imm_op:$imm), - [(set F64:$res, fpimm:$imm)], - "f64.const\t$res, $imm", 0x44>; +defm CONST_I32 : I<(outs I32:$res), (ins i32imm_op:$imm), + (outs), (ins i32imm_op:$imm), + [(set I32:$res, imm:$imm)], + "i32.const\t$res, $imm", "i32.const\t$imm", 0x41>; +defm CONST_I64 : I<(outs I64:$res), (ins i64imm_op:$imm), + (outs), (ins i64imm_op:$imm), + [(set I64:$res, imm:$imm)], + "i64.const\t$res, $imm", "i64.const\t$imm", 0x42>; +defm CONST_F32 : I<(outs F32:$res), (ins f32imm_op:$imm), + (outs), (ins f32imm_op:$imm), + [(set F32:$res, fpimm:$imm)], + "f32.const\t$res, $imm", "f32.const\t$imm", 0x43>; +defm CONST_F64 : I<(outs F64:$res), (ins f64imm_op:$imm), + (outs), (ins f64imm_op:$imm), + [(set F64:$res, fpimm:$imm)], + "f64.const\t$res, $imm", "f64.const\t$imm", 0x44>; } // isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 } // Defs = [ARGUMENTS] @@ -249,3 +284,4 @@ include "WebAssemblyInstrConv.td" include "WebAssemblyInstrFloat.td" include "WebAssemblyInstrAtomics.td" include "WebAssemblyInstrSIMD.td" +include "WebAssemblyInstrExceptRef.td" diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td index e872dc219846..f9f21fd1d754 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Integer operand code-gen constructs. +/// WebAssembly Integer operand code-gen constructs. /// //===----------------------------------------------------------------------===// @@ -56,12 +56,12 @@ defm CLZ : UnaryInt<ctlz, "clz ", 0x67, 0x79>; defm CTZ : UnaryInt<cttz, "ctz ", 0x68, 0x7a>; defm POPCNT : UnaryInt<ctpop, "popcnt", 0x69, 0x7b>; -def EQZ_I32 : I<(outs I32:$dst), (ins I32:$src), - [(set I32:$dst, (setcc I32:$src, 0, SETEQ))], - "i32.eqz \t$dst, $src", 0x45>; -def EQZ_I64 : I<(outs I32:$dst), (ins I64:$src), - [(set I32:$dst, (setcc I64:$src, 0, SETEQ))], - "i64.eqz \t$dst, $src", 0x50>; +defm EQZ_I32 : I<(outs I32:$dst), (ins I32:$src), (outs), (ins), + [(set I32:$dst, (setcc I32:$src, 0, SETEQ))], + "i32.eqz \t$dst, $src", "i32.eqz", 0x45>; +defm EQZ_I64 : I<(outs I32:$dst), (ins I64:$src), (outs), (ins), + [(set I32:$dst, (setcc I64:$src, 0, SETEQ))], + "i64.eqz \t$dst, $src", "i64.eqz", 0x50>; } // Defs = [ARGUMENTS] @@ -73,12 +73,14 @@ def : Pat<(rotr I64:$lhs, (and I64:$rhs, 63)), (ROTR_I64 I64:$lhs, I64:$rhs)>; let Defs = [ARGUMENTS] in { -def SELECT_I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs, I32:$cond), - [(set I32:$dst, (select I32:$cond, I32:$lhs, I32:$rhs))], - "i32.select\t$dst, $lhs, $rhs, $cond", 0x1b>; -def SELECT_I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs, I32:$cond), - [(set I64:$dst, (select I32:$cond, I64:$lhs, I64:$rhs))], - "i64.select\t$dst, $lhs, $rhs, $cond", 0x1b>; +defm SELECT_I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs, I32:$cond), + (outs), (ins), + [(set I32:$dst, (select I32:$cond, I32:$lhs, I32:$rhs))], + "i32.select\t$dst, $lhs, $rhs, $cond", "i32.select", 0x1b>; +defm SELECT_I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs, I32:$cond), + (outs), (ins), + [(set I64:$dst, (select I32:$cond, I64:$lhs, I64:$rhs))], + "i64.select\t$dst, $lhs, $rhs, $cond", "i64.select", 0x1b>; } // Defs = [ARGUMENTS] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td index 9d58895ca5a6..8a49325af2bd 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly Memory operand code-gen constructs. +/// WebAssembly Memory operand code-gen constructs. /// //===----------------------------------------------------------------------===// @@ -56,24 +56,27 @@ def regPlusGA : PatFrag<(ops node:$addr, node:$off), let Defs = [ARGUMENTS] in { // Defines atomic and non-atomic loads, regular and extending. -class WebAssemblyLoad<WebAssemblyRegClass rc, string Name, int Opcode> : - I<(outs rc:$dst), - (ins P2Align:$p2align, offset32_op:$off, I32:$addr), - [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"), Opcode>; +multiclass WebAssemblyLoad<WebAssemblyRegClass rc, string Name, int Opcode> { + defm "": I<(outs rc:$dst), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr), + (outs), (ins P2Align:$p2align, offset32_op:$off), + [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"), + !strconcat(Name, "\t${off}, ${p2align}"), Opcode>; +} // Basic load. // FIXME: When we can break syntax compatibility, reorder the fields in the // asmstrings to match the binary encoding. -def LOAD_I32 : WebAssemblyLoad<I32, "i32.load", 0x28>; -def LOAD_I64 : WebAssemblyLoad<I64, "i64.load", 0x29>; -def LOAD_F32 : WebAssemblyLoad<F32, "f32.load", 0x2a>; -def LOAD_F64 : WebAssemblyLoad<F64, "f64.load", 0x2b>; +defm LOAD_I32 : WebAssemblyLoad<I32, "i32.load", 0x28>; +defm LOAD_I64 : WebAssemblyLoad<I64, "i64.load", 0x29>; +defm LOAD_F32 : WebAssemblyLoad<F32, "f32.load", 0x2a>; +defm LOAD_F64 : WebAssemblyLoad<F64, "f64.load", 0x2b>; } // Defs = [ARGUMENTS] // Select loads with no constant offset. -class LoadPatNoOffset<ValueType ty, PatFrag node, I inst> : - Pat<(ty (node I32:$addr)), (inst 0, 0, $addr)>; +class LoadPatNoOffset<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind I32:$addr)), (inst 0, 0, I32:$addr)>; def : LoadPatNoOffset<i32, load, LOAD_I32>; def : LoadPatNoOffset<i64, load, LOAD_I64>; @@ -84,9 +87,8 @@ def : LoadPatNoOffset<f64, load, LOAD_F64>; // Select loads with a constant offset. // Pattern with address + immediate offset -class LoadPatImmOff<ValueType ty, PatFrag loadkind, PatFrag operand, I inst> : - Pat<(ty (loadkind (operand I32:$addr, imm:$off))), - (inst 0, imm:$off, $addr)>; +class LoadPatImmOff<ValueType ty, PatFrag kind, PatFrag operand, NI inst> : + Pat<(ty (kind (operand I32:$addr, imm:$off))), (inst 0, imm:$off, I32:$addr)>; def : LoadPatImmOff<i32, load, regPlusImm, LOAD_I32>; def : LoadPatImmOff<i64, load, regPlusImm, LOAD_I64>; @@ -97,18 +99,18 @@ def : LoadPatImmOff<i64, load, or_is_add, LOAD_I64>; def : LoadPatImmOff<f32, load, or_is_add, LOAD_F32>; def : LoadPatImmOff<f64, load, or_is_add, LOAD_F64>; -class LoadPatGlobalAddr<ValueType ty, PatFrag loadkind, I inst> : - Pat<(ty (loadkind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)))), - (inst 0, tglobaladdr:$off, $addr)>; +class LoadPatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)))), + (inst 0, tglobaladdr:$off, I32:$addr)>; def : LoadPatGlobalAddr<i32, load, LOAD_I32>; def : LoadPatGlobalAddr<i64, load, LOAD_I64>; def : LoadPatGlobalAddr<f32, load, LOAD_F32>; def : LoadPatGlobalAddr<f64, load, LOAD_F64>; -class LoadPatExternalSym<ValueType ty, PatFrag loadkind, I inst> : - Pat<(ty (loadkind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))), - (inst 0, texternalsym:$off, $addr)>; +class LoadPatExternalSym<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))), + (inst 0, texternalsym:$off, I32:$addr)>; def : LoadPatExternalSym<i32, load, LOAD_I32>; def : LoadPatExternalSym<i64, load, LOAD_I64>; def : LoadPatExternalSym<f32, load, LOAD_F32>; @@ -116,16 +118,16 @@ def : LoadPatExternalSym<f64, load, LOAD_F64>; // Select loads with just a constant offset. -class LoadPatOffsetOnly<ValueType ty, PatFrag loadkind, I inst> : - Pat<(ty (loadkind imm:$off)), (inst 0, imm:$off, (CONST_I32 0))>; +class LoadPatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind imm:$off)), (inst 0, imm:$off, (CONST_I32 0))>; def : LoadPatOffsetOnly<i32, load, LOAD_I32>; def : LoadPatOffsetOnly<i64, load, LOAD_I64>; def : LoadPatOffsetOnly<f32, load, LOAD_F32>; def : LoadPatOffsetOnly<f64, load, LOAD_F64>; -class LoadPatGlobalAddrOffOnly<ValueType ty, PatFrag loadkind, I inst> : - Pat<(ty (loadkind (WebAssemblywrapper tglobaladdr:$off))), +class LoadPatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off))), (inst 0, tglobaladdr:$off, (CONST_I32 0))>; def : LoadPatGlobalAddrOffOnly<i32, load, LOAD_I32>; @@ -133,8 +135,8 @@ def : LoadPatGlobalAddrOffOnly<i64, load, LOAD_I64>; def : LoadPatGlobalAddrOffOnly<f32, load, LOAD_F32>; def : LoadPatGlobalAddrOffOnly<f64, load, LOAD_F64>; -class LoadPatExternSymOffOnly<ValueType ty, PatFrag loadkind, I inst> : - Pat<(ty (loadkind (WebAssemblywrapper texternalsym:$off))), +class LoadPatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(ty (kind (WebAssemblywrapper texternalsym:$off))), (inst 0, texternalsym:$off, (CONST_I32 0))>; def : LoadPatExternSymOffOnly<i32, load, LOAD_I32>; def : LoadPatExternSymOffOnly<i64, load, LOAD_I64>; @@ -144,16 +146,16 @@ def : LoadPatExternSymOffOnly<f64, load, LOAD_F64>; let Defs = [ARGUMENTS] in { // Extending load. -def LOAD8_S_I32 : WebAssemblyLoad<I32, "i32.load8_s", 0x2c>; -def LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.load8_u", 0x2d>; -def LOAD16_S_I32 : WebAssemblyLoad<I32, "i32.load16_s", 0x2e>; -def LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.load16_u", 0x2f>; -def LOAD8_S_I64 : WebAssemblyLoad<I64, "i64.load8_s", 0x30>; -def LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.load8_u", 0x31>; -def LOAD16_S_I64 : WebAssemblyLoad<I64, "i64.load16_s", 0x32>; -def LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.load16_u", 0x32>; -def LOAD32_S_I64 : WebAssemblyLoad<I64, "i64.load32_s", 0x34>; -def LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.load32_u", 0x35>; +defm LOAD8_S_I32 : WebAssemblyLoad<I32, "i32.load8_s", 0x2c>; +defm LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.load8_u", 0x2d>; +defm LOAD16_S_I32 : WebAssemblyLoad<I32, "i32.load16_s", 0x2e>; +defm LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.load16_u", 0x2f>; +defm LOAD8_S_I64 : WebAssemblyLoad<I64, "i64.load8_s", 0x30>; +defm LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.load8_u", 0x31>; +defm LOAD16_S_I64 : WebAssemblyLoad<I64, "i64.load16_s", 0x32>; +defm LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.load16_u", 0x33>; +defm LOAD32_S_I64 : WebAssemblyLoad<I64, "i64.load32_s", 0x34>; +defm LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.load32_u", 0x35>; } // Defs = [ARGUMENTS] @@ -303,236 +305,191 @@ def : LoadPatExternSymOffOnly<i64, extloadi32, LOAD32_U_I64>; let Defs = [ARGUMENTS] in { +// Defines atomic and non-atomic stores, regular and truncating +multiclass WebAssemblyStore<WebAssemblyRegClass rc, string Name, int Opcode> { + defm "" : I<(outs), + (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val), + (outs), + (ins P2Align:$p2align, offset32_op:$off), [], + !strconcat(Name, "\t${off}(${addr})${p2align}, $val"), + !strconcat(Name, "\t${off}, ${p2align}"), Opcode>; +} // Basic store. // Note: WebAssembly inverts SelectionDAG's usual operand order. -def STORE_I32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - I32:$val), [], - "i32.store\t${off}(${addr})${p2align}, $val", 0x36>; -def STORE_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - I64:$val), [], - "i64.store\t${off}(${addr})${p2align}, $val", 0x37>; -def STORE_F32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - F32:$val), [], - "f32.store\t${off}(${addr})${p2align}, $val", 0x38>; -def STORE_F64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - F64:$val), [], - "f64.store\t${off}(${addr})${p2align}, $val", 0x39>; +defm STORE_I32 : WebAssemblyStore<I32, "i32.store", 0x36>; +defm STORE_I64 : WebAssemblyStore<I64, "i64.store", 0x37>; +defm STORE_F32 : WebAssemblyStore<F32, "f32.store", 0x38>; +defm STORE_F64 : WebAssemblyStore<F64, "f64.store", 0x39>; } // Defs = [ARGUMENTS] // Select stores with no constant offset. -def : Pat<(store I32:$val, I32:$addr), (STORE_I32 0, 0, I32:$addr, I32:$val)>; -def : Pat<(store I64:$val, I32:$addr), (STORE_I64 0, 0, I32:$addr, I64:$val)>; -def : Pat<(store F32:$val, I32:$addr), (STORE_F32 0, 0, I32:$addr, F32:$val)>; -def : Pat<(store F64:$val, I32:$addr), (STORE_F64 0, 0, I32:$addr, F64:$val)>; +class StorePatNoOffset<ValueType ty, PatFrag node, NI inst> : + Pat<(node ty:$val, I32:$addr), (inst 0, 0, I32:$addr, ty:$val)>; + +def : StorePatNoOffset<i32, store, STORE_I32>; +def : StorePatNoOffset<i64, store, STORE_I64>; +def : StorePatNoOffset<f32, store, STORE_F32>; +def : StorePatNoOffset<f64, store, STORE_F64>; // Select stores with a constant offset. -def : Pat<(store I32:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE_I32 0, imm:$off, I32:$addr, I32:$val)>; -def : Pat<(store I64:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(store F32:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE_F32 0, imm:$off, I32:$addr, F32:$val)>; -def : Pat<(store F64:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE_F64 0, imm:$off, I32:$addr, F64:$val)>; -def : Pat<(store I32:$val, (or_is_add I32:$addr, imm:$off)), - (STORE_I32 0, imm:$off, I32:$addr, I32:$val)>; -def : Pat<(store I64:$val, (or_is_add I32:$addr, imm:$off)), - (STORE_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(store F32:$val, (or_is_add I32:$addr, imm:$off)), - (STORE_F32 0, imm:$off, I32:$addr, F32:$val)>; -def : Pat<(store F64:$val, (or_is_add I32:$addr, imm:$off)), - (STORE_F64 0, imm:$off, I32:$addr, F64:$val)>; -def : Pat<(store I32:$val, (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE_I32 0, tglobaladdr:$off, I32:$addr, I32:$val)>; -def : Pat<(store I64:$val, (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>; -def : Pat<(store F32:$val, (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE_F32 0, tglobaladdr:$off, I32:$addr, F32:$val)>; -def : Pat<(store F64:$val, (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE_F64 0, tglobaladdr:$off, I32:$addr, F64:$val)>; -def : Pat<(store I32:$val, (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE_I32 0, texternalsym:$off, I32:$addr, I32:$val)>; -def : Pat<(store I64:$val, (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE_I64 0, texternalsym:$off, I32:$addr, I64:$val)>; -def : Pat<(store F32:$val, (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE_F32 0, texternalsym:$off, I32:$addr, F32:$val)>; -def : Pat<(store F64:$val, (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE_F64 0, texternalsym:$off, I32:$addr, F64:$val)>; +class StorePatImmOff<ValueType ty, PatFrag kind, PatFrag operand, NI inst> : + Pat<(kind ty:$val, (operand I32:$addr, imm:$off)), + (inst 0, imm:$off, I32:$addr, ty:$val)>; + +def : StorePatImmOff<i32, store, regPlusImm, STORE_I32>; +def : StorePatImmOff<i64, store, regPlusImm, STORE_I64>; +def : StorePatImmOff<f32, store, regPlusImm, STORE_F32>; +def : StorePatImmOff<f64, store, regPlusImm, STORE_F64>; +def : StorePatImmOff<i32, store, or_is_add, STORE_I32>; +def : StorePatImmOff<i64, store, or_is_add, STORE_I64>; +def : StorePatImmOff<f32, store, or_is_add, STORE_F32>; +def : StorePatImmOff<f64, store, or_is_add, STORE_F64>; + +class StorePatGlobalAddr<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind ty:$val, + (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off))), + (inst 0, tglobaladdr:$off, I32:$addr, ty:$val)>; +def : StorePatGlobalAddr<i32, store, STORE_I32>; +def : StorePatGlobalAddr<i64, store, STORE_I64>; +def : StorePatGlobalAddr<f32, store, STORE_F32>; +def : StorePatGlobalAddr<f64, store, STORE_F64>; + +class StorePatExternalSym<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind ty:$val, (add I32:$addr, (WebAssemblywrapper texternalsym:$off))), + (inst 0, texternalsym:$off, I32:$addr, ty:$val)>; +def : StorePatExternalSym<i32, store, STORE_I32>; +def : StorePatExternalSym<i64, store, STORE_I64>; +def : StorePatExternalSym<f32, store, STORE_F32>; +def : StorePatExternalSym<f64, store, STORE_F64>; // Select stores with just a constant offset. -def : Pat<(store I32:$val, imm:$off), - (STORE_I32 0, imm:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(store I64:$val, imm:$off), - (STORE_I64 0, imm:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(store F32:$val, imm:$off), - (STORE_F32 0, imm:$off, (CONST_I32 0), F32:$val)>; -def : Pat<(store F64:$val, imm:$off), - (STORE_F64 0, imm:$off, (CONST_I32 0), F64:$val)>; -def : Pat<(store I32:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE_I32 0, tglobaladdr:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(store I64:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(store F32:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE_F32 0, tglobaladdr:$off, (CONST_I32 0), F32:$val)>; -def : Pat<(store F64:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE_F64 0, tglobaladdr:$off, (CONST_I32 0), F64:$val)>; -def : Pat<(store I32:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE_I32 0, texternalsym:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(store I64:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(store F32:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE_F32 0, texternalsym:$off, (CONST_I32 0), F32:$val)>; -def : Pat<(store F64:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE_F64 0, texternalsym:$off, (CONST_I32 0), F64:$val)>; +class StorePatOffsetOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind ty:$val, imm:$off), (inst 0, imm:$off, (CONST_I32 0), ty:$val)>; +def : StorePatOffsetOnly<i32, store, STORE_I32>; +def : StorePatOffsetOnly<i64, store, STORE_I64>; +def : StorePatOffsetOnly<f32, store, STORE_F32>; +def : StorePatOffsetOnly<f64, store, STORE_F64>; + +class StorePatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind ty:$val, (WebAssemblywrapper tglobaladdr:$off)), + (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>; +def : StorePatGlobalAddrOffOnly<i32, store, STORE_I32>; +def : StorePatGlobalAddrOffOnly<i64, store, STORE_I64>; +def : StorePatGlobalAddrOffOnly<f32, store, STORE_F32>; +def : StorePatGlobalAddrOffOnly<f64, store, STORE_F64>; + +class StorePatExternSymOffOnly<ValueType ty, PatFrag kind, NI inst> : + Pat<(kind ty:$val, (WebAssemblywrapper texternalsym:$off)), + (inst 0, texternalsym:$off, (CONST_I32 0), ty:$val)>; +def : StorePatExternSymOffOnly<i32, store, STORE_I32>; +def : StorePatExternSymOffOnly<i64, store, STORE_I64>; +def : StorePatExternSymOffOnly<f32, store, STORE_F32>; +def : StorePatExternSymOffOnly<f64, store, STORE_F64>; + let Defs = [ARGUMENTS] in { // Truncating store. -def STORE8_I32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - I32:$val), [], - "i32.store8\t${off}(${addr})${p2align}, $val", 0x3a>; -def STORE16_I32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - I32:$val), [], - "i32.store16\t${off}(${addr})${p2align}, $val", 0x3b>; -def STORE8_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - I64:$val), [], - "i64.store8\t${off}(${addr})${p2align}, $val", 0x3c>; -def STORE16_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - I64:$val), [], - "i64.store16\t${off}(${addr})${p2align}, $val", 0x3d>; -def STORE32_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, - I64:$val), [], - "i64.store32\t${off}(${addr})${p2align}, $val", 0x3e>; +defm STORE8_I32 : WebAssemblyStore<I32, "i32.store8", 0x3a>; +defm STORE16_I32 : WebAssemblyStore<I32, "i32.store16", 0x3b>; +defm STORE8_I64 : WebAssemblyStore<I64, "i64.store8", 0x3c>; +defm STORE16_I64 : WebAssemblyStore<I64, "i64.store16", 0x3d>; +defm STORE32_I64 : WebAssemblyStore<I64, "i64.store32", 0x3e>; } // Defs = [ARGUMENTS] // Select truncating stores with no constant offset. -def : Pat<(truncstorei8 I32:$val, I32:$addr), - (STORE8_I32 0, 0, I32:$addr, I32:$val)>; -def : Pat<(truncstorei16 I32:$val, I32:$addr), - (STORE16_I32 0, 0, I32:$addr, I32:$val)>; -def : Pat<(truncstorei8 I64:$val, I32:$addr), - (STORE8_I64 0, 0, I32:$addr, I64:$val)>; -def : Pat<(truncstorei16 I64:$val, I32:$addr), - (STORE16_I64 0, 0, I32:$addr, I64:$val)>; -def : Pat<(truncstorei32 I64:$val, I32:$addr), - (STORE32_I64 0, 0, I32:$addr, I64:$val)>; +def : StorePatNoOffset<i32, truncstorei8, STORE8_I32>; +def : StorePatNoOffset<i32, truncstorei16, STORE16_I32>; +def : StorePatNoOffset<i64, truncstorei8, STORE8_I64>; +def : StorePatNoOffset<i64, truncstorei16, STORE16_I64>; +def : StorePatNoOffset<i64, truncstorei32, STORE32_I64>; // Select truncating stores with a constant offset. -def : Pat<(truncstorei8 I32:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE8_I32 0, imm:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei16 I32:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE16_I32 0, imm:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei8 I64:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE8_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei16 I64:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE16_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei32 I64:$val, (regPlusImm I32:$addr, imm:$off)), - (STORE32_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei8 I32:$val, (or_is_add I32:$addr, imm:$off)), - (STORE8_I32 0, imm:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei16 I32:$val, (or_is_add I32:$addr, imm:$off)), - (STORE16_I32 0, imm:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei8 I64:$val, (or_is_add I32:$addr, imm:$off)), - (STORE8_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei16 I64:$val, (or_is_add I32:$addr, imm:$off)), - (STORE16_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei32 I64:$val, (or_is_add I32:$addr, imm:$off)), - (STORE32_I64 0, imm:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei8 I32:$val, - (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE8_I32 0, tglobaladdr:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei16 I32:$val, - (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE16_I32 0, tglobaladdr:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei8 I64:$val, - (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE8_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei16 I64:$val, - (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE16_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei32 I64:$val, - (regPlusGA I32:$addr, - (WebAssemblywrapper tglobaladdr:$off))), - (STORE32_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei8 I32:$val, (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE8_I32 0, texternalsym:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei16 I32:$val, - (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE16_I32 0, texternalsym:$off, I32:$addr, I32:$val)>; -def : Pat<(truncstorei8 I64:$val, - (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE8_I64 0, texternalsym:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei16 I64:$val, - (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE16_I64 0, texternalsym:$off, I32:$addr, I64:$val)>; -def : Pat<(truncstorei32 I64:$val, - (add I32:$addr, - (WebAssemblywrapper texternalsym:$off))), - (STORE32_I64 0, texternalsym:$off, I32:$addr, I64:$val)>; +def : StorePatImmOff<i32, truncstorei8, regPlusImm, STORE8_I32>; +def : StorePatImmOff<i32, truncstorei16, regPlusImm, STORE16_I32>; +def : StorePatImmOff<i64, truncstorei8, regPlusImm, STORE8_I64>; +def : StorePatImmOff<i64, truncstorei16, regPlusImm, STORE16_I64>; +def : StorePatImmOff<i64, truncstorei32, regPlusImm, STORE32_I64>; +def : StorePatImmOff<i32, truncstorei8, or_is_add, STORE8_I32>; +def : StorePatImmOff<i32, truncstorei16, or_is_add, STORE16_I32>; +def : StorePatImmOff<i64, truncstorei8, or_is_add, STORE8_I64>; +def : StorePatImmOff<i64, truncstorei16, or_is_add, STORE16_I64>; +def : StorePatImmOff<i64, truncstorei32, or_is_add, STORE32_I64>; + +def : StorePatGlobalAddr<i32, truncstorei8, STORE8_I32>; +def : StorePatGlobalAddr<i32, truncstorei16, STORE16_I32>; +def : StorePatGlobalAddr<i64, truncstorei8, STORE8_I64>; +def : StorePatGlobalAddr<i64, truncstorei16, STORE16_I64>; +def : StorePatGlobalAddr<i64, truncstorei32, STORE32_I64>; +def : StorePatExternalSym<i32, truncstorei8, STORE8_I32>; +def : StorePatExternalSym<i32, truncstorei16, STORE16_I32>; +def : StorePatExternalSym<i64, truncstorei8, STORE8_I64>; +def : StorePatExternalSym<i64, truncstorei16, STORE16_I64>; +def : StorePatExternalSym<i64, truncstorei32, STORE32_I64>; // Select truncating stores with just a constant offset. -def : Pat<(truncstorei8 I32:$val, imm:$off), - (STORE8_I32 0, imm:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(truncstorei16 I32:$val, imm:$off), - (STORE16_I32 0, imm:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(truncstorei8 I64:$val, imm:$off), - (STORE8_I64 0, imm:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei16 I64:$val, imm:$off), - (STORE16_I64 0, imm:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei32 I64:$val, imm:$off), - (STORE32_I64 0, imm:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei8 I32:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE8_I32 0, tglobaladdr:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(truncstorei16 I32:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE16_I32 0, tglobaladdr:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(truncstorei8 I64:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE8_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei16 I64:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE16_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei32 I64:$val, (WebAssemblywrapper tglobaladdr:$off)), - (STORE32_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei8 I32:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE8_I32 0, texternalsym:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(truncstorei16 I32:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE16_I32 0, texternalsym:$off, (CONST_I32 0), I32:$val)>; -def : Pat<(truncstorei8 I64:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE8_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei16 I64:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE16_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>; -def : Pat<(truncstorei32 I64:$val, (WebAssemblywrapper texternalsym:$off)), - (STORE32_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>; +def : StorePatOffsetOnly<i32, truncstorei8, STORE8_I32>; +def : StorePatOffsetOnly<i32, truncstorei16, STORE16_I32>; +def : StorePatOffsetOnly<i64, truncstorei8, STORE8_I64>; +def : StorePatOffsetOnly<i64, truncstorei16, STORE16_I64>; +def : StorePatOffsetOnly<i64, truncstorei32, STORE32_I64>; +def : StorePatGlobalAddrOffOnly<i32, truncstorei8, STORE8_I32>; +def : StorePatGlobalAddrOffOnly<i32, truncstorei16, STORE16_I32>; +def : StorePatGlobalAddrOffOnly<i64, truncstorei8, STORE8_I64>; +def : StorePatGlobalAddrOffOnly<i64, truncstorei16, STORE16_I64>; +def : StorePatGlobalAddrOffOnly<i64, truncstorei32, STORE32_I64>; +def : StorePatExternSymOffOnly<i32, truncstorei8, STORE8_I32>; +def : StorePatExternSymOffOnly<i32, truncstorei16, STORE16_I32>; +def : StorePatExternSymOffOnly<i64, truncstorei8, STORE8_I64>; +def : StorePatExternSymOffOnly<i64, truncstorei16, STORE16_I64>; +def : StorePatExternSymOffOnly<i64, truncstorei32, STORE32_I64>; let Defs = [ARGUMENTS] in { // Current memory size. -def CURRENT_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags), - [], - "current_memory\t$dst", 0x3f>, - Requires<[HasAddr32]>; +defm MEMORY_SIZE_I32 : I<(outs I32:$dst), (ins i32imm:$flags), + (outs), (ins i32imm:$flags), + [(set I32:$dst, + (int_wasm_memory_size (i32 imm:$flags)))], + "memory.size\t$dst, $flags", "memory.size\t$flags", + 0x3f>, + Requires<[HasAddr32]>; +defm MEM_SIZE_I32 : I<(outs I32:$dst), (ins i32imm:$flags), + (outs), (ins i32imm:$flags), + [(set I32:$dst, (int_wasm_mem_size (i32 imm:$flags)))], + "mem.size\t$dst, $flags", "mem.size\t$flags", 0x3f>, + Requires<[HasAddr32]>; +defm CURRENT_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags), + (outs), (ins i32imm:$flags), + [], + "current_memory\t$dst", + "current_memory\t$flags", 0x3f>, + Requires<[HasAddr32]>; // Grow memory. -def GROW_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta), - [], - "grow_memory\t$dst, $delta", 0x40>, - Requires<[HasAddr32]>; +defm MEMORY_GROW_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta), + (outs), (ins i32imm:$flags, I32:$delta), + [(set I32:$dst, + (int_wasm_memory_grow (i32 imm:$flags), + I32:$delta))], + "memory.grow\t$dst, $flags, $delta", + "memory.grow\t$flags, $delta", 0x3f>, + Requires<[HasAddr32]>; +defm MEM_GROW_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta), + (outs), (ins i32imm:$flags), + [(set I32:$dst, + (int_wasm_mem_grow (i32 imm:$flags), I32:$delta))], + "mem.grow\t$dst, $flags, $delta", "mem.grow\t$flags", + 0x3f>, + Requires<[HasAddr32]>; +defm GROW_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta), + (outs), (ins i32imm:$flags), + [], + "grow_memory\t$dst, $delta", "grow_memory\t$flags", + 0x40>, + Requires<[HasAddr32]>; } // Defs = [ARGUMENTS] diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index e403534d580a..7d1edccdeb3c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief WebAssembly SIMD operand code-gen constructs. +/// WebAssembly SIMD operand code-gen constructs. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp new file mode 100644 index 000000000000..e42dcbc0a8ac --- /dev/null +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp @@ -0,0 +1,383 @@ +//=== WebAssemblyLateEHPrepare.cpp - WebAssembly Exception Preparation -===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// \brief Does various transformations for exception handling. +/// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" +#include "WebAssembly.h" +#include "WebAssemblySubtarget.h" +#include "WebAssemblyUtilities.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/WasmEHFuncInfo.h" +#include "llvm/MC/MCAsmInfo.h" +using namespace llvm; + +#define DEBUG_TYPE "wasm-exception-prepare" + +namespace { +class WebAssemblyLateEHPrepare final : public MachineFunctionPass { + StringRef getPassName() const override { + return "WebAssembly Prepare Exception"; + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + bool replaceFuncletReturns(MachineFunction &MF); + bool hoistCatches(MachineFunction &MF); + bool addCatchAlls(MachineFunction &MF); + bool addRethrows(MachineFunction &MF); + bool ensureSingleBBTermPads(MachineFunction &MF); + bool mergeTerminatePads(MachineFunction &MF); + bool addCatchAllTerminatePads(MachineFunction &MF); + +public: + static char ID; // Pass identification, replacement for typeid + WebAssemblyLateEHPrepare() : MachineFunctionPass(ID) {} +}; +} // end anonymous namespace + +char WebAssemblyLateEHPrepare::ID = 0; +INITIALIZE_PASS(WebAssemblyLateEHPrepare, DEBUG_TYPE, + "WebAssembly Exception Preparation", false, false) + +FunctionPass *llvm::createWebAssemblyLateEHPrepare() { + return new WebAssemblyLateEHPrepare(); +} + +// Returns the nearest EH pad that dominates this instruction. This does not use +// dominator analysis; it just does BFS on its predecessors until arriving at an +// EH pad. This assumes valid EH scopes so the first EH pad it arrives in all +// possible search paths should be the same. +// Returns nullptr in case it does not find any EH pad in the search, or finds +// multiple different EH pads. +MachineBasicBlock *GetMatchingEHPad(MachineInstr *MI) { + MachineFunction *MF = MI->getParent()->getParent(); + SmallVector<MachineBasicBlock *, 2> WL; + SmallPtrSet<MachineBasicBlock *, 2> Visited; + WL.push_back(MI->getParent()); + MachineBasicBlock *EHPad = nullptr; + while (!WL.empty()) { + MachineBasicBlock *MBB = WL.pop_back_val(); + if (Visited.count(MBB)) + continue; + Visited.insert(MBB); + if (MBB->isEHPad()) { + if (EHPad && EHPad != MBB) + return nullptr; + EHPad = MBB; + continue; + } + if (MBB == &MF->front()) + return nullptr; + WL.append(MBB->pred_begin(), MBB->pred_end()); + } + return EHPad; +} + +// Erases the given BB and all its children from the function. If other BBs have +// this BB as a successor, the successor relationships will be deleted as well. +static void EraseBBAndChildren(MachineBasicBlock *MBB) { + SmallVector<MachineBasicBlock *, 8> WL; + WL.push_back(MBB); + while (!WL.empty()) { + MachineBasicBlock *MBB = WL.pop_back_val(); + for (auto *Pred : MBB->predecessors()) + Pred->removeSuccessor(MBB); + for (auto *Succ : MBB->successors()) { + WL.push_back(Succ); + MBB->removeSuccessor(Succ); + } + MBB->eraseFromParent(); + } +} + +bool WebAssemblyLateEHPrepare::runOnMachineFunction(MachineFunction &MF) { + if (MF.getTarget().getMCAsmInfo()->getExceptionHandlingType() != + ExceptionHandling::Wasm) + return false; + + bool Changed = false; + Changed |= addRethrows(MF); + if (!MF.getFunction().hasPersonalityFn()) + return Changed; + Changed |= replaceFuncletReturns(MF); + Changed |= hoistCatches(MF); + Changed |= addCatchAlls(MF); + Changed |= ensureSingleBBTermPads(MF); + Changed |= mergeTerminatePads(MF); + Changed |= addCatchAllTerminatePads(MF); + return Changed; +} + +bool WebAssemblyLateEHPrepare::replaceFuncletReturns(MachineFunction &MF) { + bool Changed = false; + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + auto *EHInfo = MF.getWasmEHFuncInfo(); + + for (auto &MBB : MF) { + auto Pos = MBB.getFirstTerminator(); + if (Pos == MBB.end()) + continue; + MachineInstr *TI = &*Pos; + + switch (TI->getOpcode()) { + case WebAssembly::CATCHRET: { + // Replace a catchret with a branch + MachineBasicBlock *TBB = TI->getOperand(0).getMBB(); + if (!MBB.isLayoutSuccessor(TBB)) + BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::BR)) + .addMBB(TBB); + TI->eraseFromParent(); + Changed = true; + break; + } + case WebAssembly::CLEANUPRET: { + // Replace a cleanupret with a rethrow + if (EHInfo->hasThrowUnwindDest(&MBB)) + BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::RETHROW)) + .addMBB(EHInfo->getThrowUnwindDest(&MBB)); + else + BuildMI(MBB, TI, TI->getDebugLoc(), + TII.get(WebAssembly::RETHROW_TO_CALLER)); + + TI->eraseFromParent(); + Changed = true; + break; + } + } + } + return Changed; +} + +// Hoist catch instructions to the beginning of their matching EH pad BBs in +// case, +// (1) catch instruction is not the first instruction in EH pad. +// ehpad: +// some_other_instruction +// ... +// %exn = catch 0 +// (2) catch instruction is in a non-EH pad BB. For example, +// ehpad: +// br bb0 +// bb0: +// %exn = catch 0 +bool WebAssemblyLateEHPrepare::hoistCatches(MachineFunction &MF) { + bool Changed = false; + SmallVector<MachineInstr *, 16> Catches; + for (auto &MBB : MF) + for (auto &MI : MBB) + if (WebAssembly::isCatch(MI)) + Catches.push_back(&MI); + + for (auto *Catch : Catches) { + MachineBasicBlock *EHPad = GetMatchingEHPad(Catch); + assert(EHPad && "No matching EH pad for catch"); + if (EHPad->begin() == Catch) + continue; + Changed = true; + EHPad->insert(EHPad->begin(), Catch->removeFromParent()); + } + return Changed; +} + +// Add catch_all to beginning of cleanup pads. +bool WebAssemblyLateEHPrepare::addCatchAlls(MachineFunction &MF) { + bool Changed = false; + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + + for (auto &MBB : MF) { + if (!MBB.isEHPad()) + continue; + // This runs after hoistCatches(), so we assume that if there is a catch, + // that should be the first instruction in an EH pad. + if (!WebAssembly::isCatch(*MBB.begin())) { + Changed = true; + BuildMI(MBB, MBB.begin(), MBB.begin()->getDebugLoc(), + TII.get(WebAssembly::CATCH_ALL)); + } + } + return Changed; +} + +// Add a 'rethrow' instruction after __cxa_rethrow() call +bool WebAssemblyLateEHPrepare::addRethrows(MachineFunction &MF) { + bool Changed = false; + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + auto *EHInfo = MF.getWasmEHFuncInfo(); + + for (auto &MBB : MF) + for (auto &MI : MBB) { + // Check if it is a call to __cxa_rethrow() + if (!MI.isCall()) + continue; + MachineOperand &CalleeOp = MI.getOperand(0); + if (!CalleeOp.isGlobal() || + CalleeOp.getGlobal()->getName() != WebAssembly::CxaRethrowFn) + continue; + + // Now we have __cxa_rethrow() call + Changed = true; + auto InsertPt = std::next(MachineBasicBlock::iterator(MI)); + while (InsertPt != MBB.end() && InsertPt->isLabel()) // Skip EH_LABELs + ++InsertPt; + MachineInstr *Rethrow = nullptr; + if (EHInfo->hasThrowUnwindDest(&MBB)) + Rethrow = BuildMI(MBB, InsertPt, MI.getDebugLoc(), + TII.get(WebAssembly::RETHROW)) + .addMBB(EHInfo->getThrowUnwindDest(&MBB)); + else + Rethrow = BuildMI(MBB, InsertPt, MI.getDebugLoc(), + TII.get(WebAssembly::RETHROW_TO_CALLER)); + + // Becasue __cxa_rethrow does not return, the instruction after the + // rethrow should be an unreachable or a branch to another BB that should + // eventually lead to an unreachable. Delete it because rethrow itself is + // a terminator, and also delete non-EH pad successors if any. + MBB.erase(std::next(MachineBasicBlock::iterator(Rethrow)), MBB.end()); + for (auto *Succ : MBB.successors()) + if (!Succ->isEHPad()) + EraseBBAndChildren(Succ); + } + return Changed; +} + +// Terminate pads are an single-BB EH pad in the form of +// termpad: +// %exn = catch 0 +// call @__clang_call_terminate(%exn) +// unreachable +// (There can be set_local and get_locals before the call if we didn't run +// RegStackify) +// But code transformations can change or add more control flow, so the call to +// __clang_call_terminate() function may not be in the original EH pad anymore. +// This ensures every terminate pad is a single BB in the form illustrated +// above. +bool WebAssemblyLateEHPrepare::ensureSingleBBTermPads(MachineFunction &MF) { + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + + // Find calls to __clang_call_terminate() + SmallVector<MachineInstr *, 8> ClangCallTerminateCalls; + for (auto &MBB : MF) + for (auto &MI : MBB) + if (MI.isCall()) { + const MachineOperand &CalleeOp = MI.getOperand(0); + if (CalleeOp.isGlobal() && CalleeOp.getGlobal()->getName() == + WebAssembly::ClangCallTerminateFn) + ClangCallTerminateCalls.push_back(&MI); + } + + bool Changed = false; + for (auto *Call : ClangCallTerminateCalls) { + MachineBasicBlock *EHPad = GetMatchingEHPad(Call); + assert(EHPad && "No matching EH pad for catch"); + + // If it is already the form we want, skip it + if (Call->getParent() == EHPad && + Call->getNextNode()->getOpcode() == WebAssembly::UNREACHABLE) + continue; + + // In case the __clang_call_terminate() call is not in its matching EH pad, + // move the call to the end of EH pad and add an unreachable instruction + // after that. Delete all successors and their children if any, because here + // the program terminates. + Changed = true; + MachineInstr *Catch = &*EHPad->begin(); + // This runs after hoistCatches(), so catch instruction should be at the top + assert(WebAssembly::isCatch(*Catch)); + // Takes the result register of the catch instruction as argument. There may + // have been some other set_local/get_locals in between, but at this point + // we don't care. + Call->getOperand(1).setReg(Catch->getOperand(0).getReg()); + auto InsertPos = std::next(MachineBasicBlock::iterator(Catch)); + EHPad->insert(InsertPos, Call->removeFromParent()); + BuildMI(*EHPad, InsertPos, Call->getDebugLoc(), + TII.get(WebAssembly::UNREACHABLE)); + EHPad->erase(InsertPos, EHPad->end()); + for (auto *Succ : EHPad->successors()) + EraseBBAndChildren(Succ); + } + return Changed; +} + +// In case there are multiple terminate pads, merge them into one for code size. +// This runs after ensureSingleBBTermPads() and assumes every terminate pad is a +// single BB. +// In principle this violates EH scope relationship because it can merge +// multiple inner EH scopes, each of which is in different outer EH scope. But +// getEHScopeMembership() function will not be called after this, so it is fine. +bool WebAssemblyLateEHPrepare::mergeTerminatePads(MachineFunction &MF) { + SmallVector<MachineBasicBlock *, 8> TermPads; + for (auto &MBB : MF) + if (WebAssembly::isCatchTerminatePad(MBB)) + TermPads.push_back(&MBB); + if (TermPads.empty()) + return false; + + MachineBasicBlock *UniqueTermPad = TermPads.front(); + for (auto *TermPad : + llvm::make_range(std::next(TermPads.begin()), TermPads.end())) { + SmallVector<MachineBasicBlock *, 2> Preds(TermPad->pred_begin(), + TermPad->pred_end()); + for (auto *Pred : Preds) + Pred->replaceSuccessor(TermPad, UniqueTermPad); + TermPad->eraseFromParent(); + } + return true; +} + +// Terminate pads are cleanup pads, so they should start with a 'catch_all' +// instruction. But in the Itanium model, when we have a C++ exception object, +// we pass them to __clang_call_terminate function, which calls __cxa_end_catch +// with the passed exception pointer and then std::terminate. This is the reason +// that terminate pads are generated with not a catch_all but a catch +// instruction in clang and earlier llvm passes. Here we append a terminate pad +// with a catch_all after each existing terminate pad so we can also catch +// foreign exceptions. For every terminate pad: +// %exn = catch 0 +// call @__clang_call_terminate(%exn) +// unreachable +// We append this BB right after that: +// catch_all +// call @std::terminate() +// unreachable +bool WebAssemblyLateEHPrepare::addCatchAllTerminatePads(MachineFunction &MF) { + const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); + SmallVector<MachineBasicBlock *, 8> TermPads; + for (auto &MBB : MF) + if (WebAssembly::isCatchTerminatePad(MBB)) + TermPads.push_back(&MBB); + if (TermPads.empty()) + return false; + + Function *StdTerminateFn = + MF.getFunction().getParent()->getFunction(WebAssembly::StdTerminateFn); + assert(StdTerminateFn && "There is no std::terminate() function"); + for (auto *CatchTermPad : TermPads) { + DebugLoc DL = CatchTermPad->findDebugLoc(CatchTermPad->begin()); + auto *CatchAllTermPad = MF.CreateMachineBasicBlock(); + MF.insert(std::next(MachineFunction::iterator(CatchTermPad)), + CatchAllTermPad); + CatchAllTermPad->setIsEHPad(); + BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::CATCH_ALL)); + BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::CALL_VOID)) + .addGlobalAddress(StdTerminateFn); + BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::UNREACHABLE)); + + // Actually this CatchAllTermPad (new terminate pad with a catch_all) is not + // a successor of an existing terminate pad. CatchAllTermPad should have all + // predecessors CatchTermPad has instead. This is a hack to force + // CatchAllTermPad be always sorted right after CatchTermPad; the correct + // predecessor-successor relationships will be restored in CFGStackify pass. + CatchTermPad->addSuccessor(CatchAllTermPad); + } + return true; +} diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp index 5b867aa763a1..5fb97e38939a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file lowers br_unless into br_if with an inverted condition. +/// This file lowers br_unless into br_if with an inverted condition. /// /// br_unless is not currently in the spec, but it's very convenient for LLVM /// to use. This pass allows LLVM to use it, for now. @@ -47,14 +47,17 @@ public: } // end anonymous namespace char WebAssemblyLowerBrUnless::ID = 0; +INITIALIZE_PASS(WebAssemblyLowerBrUnless, DEBUG_TYPE, + "Lowers br_unless into inverted br_if", false, false) + FunctionPass *llvm::createWebAssemblyLowerBrUnless() { return new WebAssemblyLowerBrUnless(); } bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** Lowering br_unless **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Lowering br_unless **********\n" + "********** Function: " + << MF.getName() << '\n'); auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>(); const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo(); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp index f0b6a3e35dba..e9cb7c10113b 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file lowers exception-related instructions and setjmp/longjmp +/// This file lowers exception-related instructions and setjmp/longjmp /// function calls in order to use Emscripten's JavaScript try and catch /// mechanism. /// @@ -225,13 +225,8 @@ static cl::list<std::string> namespace { class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass { - static const char *ThrewGVName; - static const char *ThrewValueGVName; - static const char *TempRet0GVName; static const char *ResumeFName; static const char *EHTypeIDFName; - static const char *SetThrewFName; - static const char *SetTempRet0FName; static const char *EmLongjmpFName; static const char *EmLongjmpJmpbufFName; static const char *SaveSetjmpFName; @@ -300,14 +295,9 @@ public: }; } // End anonymous namespace -const char *WebAssemblyLowerEmscriptenEHSjLj::ThrewGVName = "__THREW__"; -const char *WebAssemblyLowerEmscriptenEHSjLj::ThrewValueGVName = "__threwValue"; -const char *WebAssemblyLowerEmscriptenEHSjLj::TempRet0GVName = "__tempRet0"; const char *WebAssemblyLowerEmscriptenEHSjLj::ResumeFName = "__resumeException"; const char *WebAssemblyLowerEmscriptenEHSjLj::EHTypeIDFName = "llvm_eh_typeid_for"; -const char *WebAssemblyLowerEmscriptenEHSjLj::SetThrewFName = "setThrew"; -const char *WebAssemblyLowerEmscriptenEHSjLj::SetTempRet0FName = "setTempRet0"; const char *WebAssemblyLowerEmscriptenEHSjLj::EmLongjmpFName = "emscripten_longjmp"; const char *WebAssemblyLowerEmscriptenEHSjLj::EmLongjmpJmpbufFName = @@ -343,15 +333,13 @@ static bool canThrow(const Value *V) { return true; } -// Returns an available name for a global value. -// If the proposed name already exists in the module, adds '_' at the end of -// the name until the name is available. -static inline std::string createGlobalValueName(const Module &M, - const std::string &Propose) { - std::string Name = Propose; - while (M.getNamedGlobal(Name)) - Name += "_"; - return Name; +static GlobalVariable *createGlobalVariableI32(Module &M, IRBuilder<> &IRB, + const char *Name) { + if (M.getNamedGlobal(Name)) + report_fatal_error(Twine("variable name is reserved: ") + Name); + + return new GlobalVariable(M, IRB.getInt32Ty(), false, + GlobalValue::WeakODRLinkage, IRB.getInt32(0), Name); } // Simple function name mangler. @@ -613,11 +601,13 @@ void WebAssemblyLowerEmscriptenEHSjLj::createSetThrewFunction(Module &M) { LLVMContext &C = M.getContext(); IRBuilder<> IRB(C); - assert(!M.getNamedGlobal(SetThrewFName) && "setThrew already exists"); + if (M.getNamedGlobal("setThrew")) + report_fatal_error("setThrew already exists"); + Type *Params[] = {IRB.getInt32Ty(), IRB.getInt32Ty()}; FunctionType *FTy = FunctionType::get(IRB.getVoidTy(), Params, false); Function *F = - Function::Create(FTy, GlobalValue::ExternalLinkage, SetThrewFName, &M); + Function::Create(FTy, GlobalValue::WeakODRLinkage, "setThrew", &M); Argument *Arg1 = &*(F->arg_begin()); Argument *Arg2 = &*std::next(F->arg_begin()); Arg1->setName("threw"); @@ -648,11 +638,12 @@ void WebAssemblyLowerEmscriptenEHSjLj::createSetTempRet0Function(Module &M) { LLVMContext &C = M.getContext(); IRBuilder<> IRB(C); - assert(!M.getNamedGlobal(SetTempRet0FName) && "setTempRet0 already exists"); + if (M.getNamedGlobal("setTempRet0")) + report_fatal_error("setTempRet0 already exists"); Type *Params[] = {IRB.getInt32Ty()}; FunctionType *FTy = FunctionType::get(IRB.getVoidTy(), Params, false); Function *F = - Function::Create(FTy, GlobalValue::ExternalLinkage, SetTempRet0FName, &M); + Function::Create(FTy, GlobalValue::WeakODRLinkage, "setTempRet0", &M); F->arg_begin()->setName("value"); BasicBlock *EntryBB = BasicBlock::Create(C, "entry", F); IRB.SetInsertPoint(EntryBB); @@ -699,15 +690,9 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) { // Create global variables __THREW__, threwValue, and __tempRet0, which are // used in common for both exception handling and setjmp/longjmp handling - ThrewGV = new GlobalVariable(M, IRB.getInt32Ty(), false, - GlobalValue::ExternalLinkage, IRB.getInt32(0), - createGlobalValueName(M, ThrewGVName)); - ThrewValueGV = new GlobalVariable( - M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, IRB.getInt32(0), - createGlobalValueName(M, ThrewValueGVName)); - TempRet0GV = new GlobalVariable(M, IRB.getInt32Ty(), false, - GlobalValue::ExternalLinkage, IRB.getInt32(0), - createGlobalValueName(M, TempRet0GVName)); + ThrewGV = createGlobalVariableI32(M, IRB, "__THREW__"); + ThrewValueGV = createGlobalVariableI32(M, IRB, "__threwValue"); + TempRet0GV = createGlobalVariableI32(M, IRB, "__tempRet0"); bool Changed = false; @@ -736,12 +721,6 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) { if (DoSjLj) { Changed = true; // We have setjmp or longjmp somewhere - Function *MallocF = M.getFunction("malloc"); - Function *FreeF = M.getFunction("free"); - if (!MallocF || !FreeF) - report_fatal_error( - "malloc and free must be linked into the module if setjmp is used"); - // Register saveSetjmp function FunctionType *SetjmpFTy = SetjmpF->getFunctionType(); SmallVector<Type *, 4> Params = {SetjmpFTy->getParamType(0), diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp index 0020817aee41..ee708d637b25 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Lower @llvm.global_dtors. +/// Lower @llvm.global_dtors. /// /// WebAssembly doesn't have a builtin way to invoke static destructors. /// Implement @llvm.global_dtors by creating wrapper functions that are @@ -51,6 +51,9 @@ public: } // End anonymous namespace char LowerGlobalDtors::ID = 0; +INITIALIZE_PASS(LowerGlobalDtors, DEBUG_TYPE, + "Lower @llvm.global_dtors for WebAssembly", false, false) + ModulePass *llvm::createWebAssemblyLowerGlobalDtors() { return new LowerGlobalDtors(); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp index 4a93d4810c7d..d85db14fc679 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains code to lower WebAssembly MachineInstrs to their +/// This file contains code to lower WebAssembly MachineInstrs to their /// corresponding MCInst records. /// //===----------------------------------------------------------------------===// @@ -25,7 +25,6 @@ #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" -#include "llvm/MC/MCSymbolELF.h" #include "llvm/MC/MCSymbolWasm.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" @@ -34,11 +33,7 @@ using namespace llvm; MCSymbol * WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { const GlobalValue *Global = MO.getGlobal(); - MCSymbol *Sym = Printer.getSymbol(Global); - if (isa<MCSymbolELF>(Sym)) - return Sym; - - MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym); + MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Printer.getSymbol(Global)); if (const auto *FuncTy = dyn_cast<FunctionType>(Global->getValueType())) { const MachineFunction &MF = *MO.getParent()->getParent()->getParent(); @@ -74,7 +69,7 @@ WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { WasmSym->setReturns(std::move(Returns)); WasmSym->setParams(std::move(Params)); - WasmSym->setIsFunction(true); + WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); } return WasmSym; @@ -83,17 +78,22 @@ WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const { MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol( const MachineOperand &MO) const { const char *Name = MO.getSymbolName(); - MCSymbol *Sym = Printer.GetExternalSymbolSymbol(Name); - if (isa<MCSymbolELF>(Sym)) - return Sym; - - MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym); + MCSymbolWasm *WasmSym = + cast<MCSymbolWasm>(Printer.GetExternalSymbolSymbol(Name)); const WebAssemblySubtarget &Subtarget = Printer.getSubtarget(); // __stack_pointer is a global variable; all other external symbols used by - // CodeGen are functions. - if (strcmp(Name, "__stack_pointer") == 0) + // CodeGen are functions. It's OK to hardcode knowledge of specific symbols + // here; this method is precisely there for fetching the signatures of known + // Clang-provided symbols. + if (strcmp(Name, "__stack_pointer") == 0) { + WasmSym->setType(wasm::WASM_SYMBOL_TYPE_GLOBAL); + WasmSym->setGlobalType(wasm::WasmGlobalType{ + uint8_t(Subtarget.hasAddr64() ? wasm::WASM_TYPE_I64 + : wasm::WASM_TYPE_I32), + true}); return WasmSym; + } SmallVector<wasm::ValType, 4> Returns; SmallVector<wasm::ValType, 4> Params; @@ -101,7 +101,7 @@ MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol( WasmSym->setReturns(std::move(Returns)); WasmSym->setParams(std::move(Params)); - WasmSym->setIsFunction(true); + WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); return WasmSym; } @@ -169,35 +169,32 @@ void WebAssemblyMCInstLower::Lower(const MachineInstr *MI, const MCOperandInfo &Info = Desc.OpInfo[i]; if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) { MCSymbol *Sym = Printer.createTempSymbol("typeindex"); - if (!isa<MCSymbolELF>(Sym)) { - SmallVector<wasm::ValType, 4> Returns; - SmallVector<wasm::ValType, 4> Params; - - const MachineRegisterInfo &MRI = - MI->getParent()->getParent()->getRegInfo(); - for (const MachineOperand &MO : MI->defs()) - Returns.push_back(getType(MRI.getRegClass(MO.getReg()))); - for (const MachineOperand &MO : MI->explicit_uses()) - if (MO.isReg()) - Params.push_back(getType(MRI.getRegClass(MO.getReg()))); - - // call_indirect instructions have a callee operand at the end which - // doesn't count as a param. - if (WebAssembly::isCallIndirect(*MI)) - Params.pop_back(); - - MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym); - WasmSym->setReturns(std::move(Returns)); - WasmSym->setParams(std::move(Params)); - WasmSym->setIsFunction(true); - - const MCExpr *Expr = - MCSymbolRefExpr::create(WasmSym, - MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX, - Ctx); - MCOp = MCOperand::createExpr(Expr); - break; - } + + SmallVector<wasm::ValType, 4> Returns; + SmallVector<wasm::ValType, 4> Params; + + const MachineRegisterInfo &MRI = + MI->getParent()->getParent()->getRegInfo(); + for (const MachineOperand &MO : MI->defs()) + Returns.push_back(getType(MRI.getRegClass(MO.getReg()))); + for (const MachineOperand &MO : MI->explicit_uses()) + if (MO.isReg()) + Params.push_back(getType(MRI.getRegClass(MO.getReg()))); + + // call_indirect instructions have a callee operand at the end which + // doesn't count as a param. + if (WebAssembly::isCallIndirect(*MI)) + Params.pop_back(); + + MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym); + WasmSym->setReturns(std::move(Returns)); + WasmSym->setParams(std::move(Params)); + WasmSym->setType(wasm::WASM_SYMBOL_TYPE_FUNCTION); + + const MCExpr *Expr = MCSymbolRefExpr::create( + WasmSym, MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX, Ctx); + MCOp = MCOperand::createExpr(Expr); + break; } } MCOp = MCOperand::createImm(MO.getImm()); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h index d1d2794c3b8f..41b4313bb38c 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the class to lower WebAssembly MachineInstrs to +/// This file declares the class to lower WebAssembly MachineInstrs to /// their corresponding MCInst records. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp index ccf6a18b32ea..e511e574050f 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements WebAssembly-specific per-machine-function +/// This file implements WebAssembly-specific per-machine-function /// information. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h index 1fcbb7791d4e..a60b10fc5309 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares WebAssembly-specific per-machine-function +/// This file declares WebAssembly-specific per-machine-function /// information. /// //===----------------------------------------------------------------------===// @@ -60,6 +60,8 @@ class WebAssemblyFunctionInfo final : public MachineFunctionInfo { void addResult(MVT VT) { Results.push_back(VT); } const std::vector<MVT> &getResults() const { return Results; } + void clearParamsAndResults() { Params.clear(); Results.clear(); } + void setNumLocals(size_t NumLocals) { Locals.resize(NumLocals, MVT::i32); } void setLocal(size_t i, MVT VT) { Locals[i] = VT; } void addLocal(MVT VT) { Locals.push_back(VT); } @@ -81,25 +83,29 @@ class WebAssemblyFunctionInfo final : public MachineFunctionInfo { void stackifyVReg(unsigned VReg) { assert(MF.getRegInfo().getUniqueVRegDef(VReg)); - if (TargetRegisterInfo::virtReg2Index(VReg) >= VRegStackified.size()) - VRegStackified.resize(TargetRegisterInfo::virtReg2Index(VReg) + 1); - VRegStackified.set(TargetRegisterInfo::virtReg2Index(VReg)); + auto I = TargetRegisterInfo::virtReg2Index(VReg); + if (I >= VRegStackified.size()) + VRegStackified.resize(I + 1); + VRegStackified.set(I); } bool isVRegStackified(unsigned VReg) const { - if (TargetRegisterInfo::virtReg2Index(VReg) >= VRegStackified.size()) + auto I = TargetRegisterInfo::virtReg2Index(VReg); + if (I >= VRegStackified.size()) return false; - return VRegStackified.test(TargetRegisterInfo::virtReg2Index(VReg)); + return VRegStackified.test(I); } void initWARegs(); void setWAReg(unsigned VReg, unsigned WAReg) { assert(WAReg != UnusedReg); - assert(TargetRegisterInfo::virtReg2Index(VReg) < WARegs.size()); - WARegs[TargetRegisterInfo::virtReg2Index(VReg)] = WAReg; + auto I = TargetRegisterInfo::virtReg2Index(VReg); + assert(I < WARegs.size()); + WARegs[I] = WAReg; } - unsigned getWAReg(unsigned Reg) const { - assert(TargetRegisterInfo::virtReg2Index(Reg) < WARegs.size()); - return WARegs[TargetRegisterInfo::virtReg2Index(Reg)]; + unsigned getWAReg(unsigned VReg) const { + auto I = TargetRegisterInfo::virtReg2Index(VReg); + assert(I < WARegs.size()); + return WARegs[I]; } // For a given stackified WAReg, return the id number to print with push/pop. diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp index ebe97848d461..04ac22a589ea 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp @@ -8,11 +8,11 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Optimize LiveIntervals for use in a post-RA context. +/// Optimize LiveIntervals for use in a post-RA context. // /// LiveIntervals normally runs before register allocation when the code is /// only recently lowered out of SSA form, so it's uncommon for registers to -/// have multiple defs, and then they do, the defs are usually closely related. +/// have multiple defs, and when they do, the defs are usually closely related. /// Later, after coalescing, tail duplication, and other optimizations, it's /// more common to see registers with multiple unrelated defs. This pass /// updates LiveIntervals to distribute the value numbers across separate @@ -58,14 +58,17 @@ public: } // end anonymous namespace char WebAssemblyOptimizeLiveIntervals::ID = 0; +INITIALIZE_PASS(WebAssemblyOptimizeLiveIntervals, DEBUG_TYPE, + "Optimize LiveIntervals for WebAssembly", false, false) + FunctionPass *llvm::createWebAssemblyOptimizeLiveIntervals() { return new WebAssemblyOptimizeLiveIntervals(); } bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** Optimize LiveIntervals **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Optimize LiveIntervals **********\n" + "********** Function: " + << MF.getName() << '\n'); MachineRegisterInfo &MRI = MF.getRegInfo(); LiveIntervals &LIS = getAnalysis<LiveIntervals>(); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp index 559165e4c86b..113ee2532bce 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Optimize calls with "returned" attributes for WebAssembly. +/// Optimize calls with "returned" attributes for WebAssembly. /// //===----------------------------------------------------------------------===// @@ -48,6 +48,10 @@ public: } // End anonymous namespace char OptimizeReturned::ID = 0; +INITIALIZE_PASS(OptimizeReturned, DEBUG_TYPE, + "Optimize calls with \"returned\" attributes for WebAssembly", + false, false) + FunctionPass *llvm::createWebAssemblyOptimizeReturned() { return new OptimizeReturned(); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp index d2fbc5a22308..a54484407805 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Late peephole optimizations for WebAssembly. +/// Late peephole optimizations for WebAssembly. /// //===----------------------------------------------------------------------===// @@ -50,6 +50,9 @@ public: } // end anonymous namespace char WebAssemblyPeephole::ID = 0; +INITIALIZE_PASS(WebAssemblyPeephole, DEBUG_TYPE, + "WebAssembly peephole optimizations", false, false) + FunctionPass *llvm::createWebAssemblyPeephole() { return new WebAssemblyPeephole(); } @@ -80,18 +83,13 @@ static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB, return false; if (&MBB != &MF.back()) return false; - if (MF.getSubtarget<WebAssemblySubtarget>() - .getTargetTriple().isOSBinFormatELF()) { - if (&MI != &MBB.back()) - return false; - } else { - MachineBasicBlock::iterator End = MBB.end(); - --End; - assert(End->getOpcode() == WebAssembly::END_FUNCTION); - --End; - if (&MI != &*End) - return false; - } + + MachineBasicBlock::iterator End = MBB.end(); + --End; + assert(End->getOpcode() == WebAssembly::END_FUNCTION); + --End; + if (&MI != &*End) + return false; if (FallthroughOpc != WebAssembly::FALLTHROUGH_RETURN_VOID) { // If the operand isn't stackified, insert a COPY to read the operand and @@ -113,7 +111,7 @@ static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB, } bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "********** Peephole **********\n" << "********** Function: " << MF.getName() << '\n'; }); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp index 3a2876bfcde2..e44e7057e233 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief Fix up code to meet LiveInterval's requirements. +/// Fix up code to meet LiveInterval's requirements. /// /// Some CodeGen passes don't preserve LiveInterval's requirements, because /// they run after register allocation and it isn't important. However, @@ -55,6 +55,9 @@ private: } // end anonymous namespace char WebAssemblyPrepareForLiveIntervals::ID = 0; +INITIALIZE_PASS(WebAssemblyPrepareForLiveIntervals, DEBUG_TYPE, + "Fix up code for LiveIntervals", false, false) + FunctionPass *llvm::createWebAssemblyPrepareForLiveIntervals() { return new WebAssemblyPrepareForLiveIntervals(); } @@ -68,7 +71,7 @@ static bool HasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) { } bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(MachineFunction &MF) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "********** Prepare For LiveIntervals **********\n" << "********** Function: " << MF.getName() << '\n'; }); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp index 2ac3a839c3c8..d69a27937105 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a virtual register coloring pass. +/// This file implements a virtual register coloring pass. /// /// WebAssembly doesn't have a fixed number of registers, but it is still /// desirable to minimize the total number of registers used in each function. @@ -55,6 +55,9 @@ private: } // end anonymous namespace char WebAssemblyRegColoring::ID = 0; +INITIALIZE_PASS(WebAssemblyRegColoring, DEBUG_TYPE, + "Minimize number of registers used", false, false) + FunctionPass *llvm::createWebAssemblyRegColoring() { return new WebAssemblyRegColoring(); } @@ -71,7 +74,7 @@ static float computeWeight(const MachineRegisterInfo *MRI, } bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "********** Register Coloring **********\n" << "********** Function: " << MF.getName() << '\n'; }); @@ -94,7 +97,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { SmallVector<LiveInterval *, 0> SortedIntervals; SortedIntervals.reserve(NumVRegs); - DEBUG(dbgs() << "Interesting register intervals:\n"); + LLVM_DEBUG(dbgs() << "Interesting register intervals:\n"); for (unsigned i = 0; i < NumVRegs; ++i) { unsigned VReg = TargetRegisterInfo::index2VirtReg(i); if (MFI.isVRegStackified(VReg)) @@ -106,27 +109,27 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { LiveInterval *LI = &Liveness->getInterval(VReg); assert(LI->weight == 0.0f); LI->weight = computeWeight(MRI, MBFI, VReg); - DEBUG(LI->dump()); + LLVM_DEBUG(LI->dump()); SortedIntervals.push_back(LI); } - DEBUG(dbgs() << '\n'); + LLVM_DEBUG(dbgs() << '\n'); // Sort them to put arguments first (since we don't want to rename live-in // registers), by weight next, and then by position. // TODO: Investigate more intelligent sorting heuristics. For starters, we // should try to coalesce adjacent live intervals before non-adjacent ones. - std::sort(SortedIntervals.begin(), SortedIntervals.end(), - [MRI](LiveInterval *LHS, LiveInterval *RHS) { - if (MRI->isLiveIn(LHS->reg) != MRI->isLiveIn(RHS->reg)) - return MRI->isLiveIn(LHS->reg); - if (LHS->weight != RHS->weight) - return LHS->weight > RHS->weight; - if (LHS->empty() || RHS->empty()) - return !LHS->empty() && RHS->empty(); - return *LHS < *RHS; - }); - - DEBUG(dbgs() << "Coloring register intervals:\n"); + llvm::sort(SortedIntervals.begin(), SortedIntervals.end(), + [MRI](LiveInterval *LHS, LiveInterval *RHS) { + if (MRI->isLiveIn(LHS->reg) != MRI->isLiveIn(RHS->reg)) + return MRI->isLiveIn(LHS->reg); + if (LHS->weight != RHS->weight) + return LHS->weight > RHS->weight; + if (LHS->empty() || RHS->empty()) + return !LHS->empty() && RHS->empty(); + return *LHS < *RHS; + }); + + LLVM_DEBUG(dbgs() << "Coloring register intervals:\n"); SmallVector<unsigned, 16> SlotMapping(SortedIntervals.size(), -1u); SmallVector<SmallVector<LiveInterval *, 4>, 16> Assignments( SortedIntervals.size()); @@ -156,9 +159,9 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { Changed |= Old != New; UsedColors.set(Color); Assignments[Color].push_back(LI); - DEBUG(dbgs() << "Assigning vreg" - << TargetRegisterInfo::virtReg2Index(LI->reg) << " to vreg" - << TargetRegisterInfo::virtReg2Index(New) << "\n"); + LLVM_DEBUG( + dbgs() << "Assigning vreg" << TargetRegisterInfo::virtReg2Index(LI->reg) + << " to vreg" << TargetRegisterInfo::virtReg2Index(New) << "\n"); } if (!Changed) return false; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp index 766ab456a8e6..1e2a248f097e 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a pass which assigns WebAssembly register +/// This file implements a pass which assigns WebAssembly register /// numbers for CodeGen virtual registers. /// //===----------------------------------------------------------------------===// @@ -51,14 +51,18 @@ public: } // end anonymous namespace char WebAssemblyRegNumbering::ID = 0; +INITIALIZE_PASS(WebAssemblyRegNumbering, DEBUG_TYPE, + "Assigns WebAssembly register numbers for virtual registers", + false, false) + FunctionPass *llvm::createWebAssemblyRegNumbering() { return new WebAssemblyRegNumbering(); } bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** Register Numbering **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Register Numbering **********\n" + "********** Function: " + << MF.getName() << '\n'); WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -73,8 +77,8 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) { break; int64_t Imm = MI.getOperand(1).getImm(); - DEBUG(dbgs() << "Arg VReg " << MI.getOperand(0).getReg() << " -> WAReg " - << Imm << "\n"); + LLVM_DEBUG(dbgs() << "Arg VReg " << MI.getOperand(0).getReg() + << " -> WAReg " << Imm << "\n"); MFI.setWAReg(MI.getOperand(0).getReg(), Imm); } @@ -92,13 +96,13 @@ bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) { continue; // Handle stackified registers. if (MFI.isVRegStackified(VReg)) { - DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " - << (INT32_MIN | NumStackRegs) << "\n"); + LLVM_DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " + << (INT32_MIN | NumStackRegs) << "\n"); MFI.setWAReg(VReg, INT32_MIN | NumStackRegs++); continue; } if (MFI.getWAReg(VReg) == WebAssemblyFunctionInfo::UnusedReg) { - DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " << CurReg << "\n"); + LLVM_DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " << CurReg << "\n"); MFI.setWAReg(VReg, CurReg++); } } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp index a4bb967f36f6..9f5d5bd87831 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a register stacking pass. +/// This file implements a register stacking pass. /// /// This pass reorders instructions to put register uses and defs in an order /// such that they form single-use expression trees. Registers fitting this form @@ -67,6 +67,10 @@ public: } // end anonymous namespace char WebAssemblyRegStackify::ID = 0; +INITIALIZE_PASS(WebAssemblyRegStackify, DEBUG_TYPE, + "Reorder instructions to use the WebAssembly value stack", + false, false) + FunctionPass *llvm::createWebAssemblyRegStackify() { return new WebAssemblyRegStackify(); } @@ -156,10 +160,9 @@ static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read, // and/or uses the stack pointer value. static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read, bool &Write, bool &Effects, bool &StackPointer) { - assert(!MI.isPosition()); assert(!MI.isTerminator()); - if (MI.isDebugValue()) + if (MI.isDebugInstr() || MI.isPosition()) return; // Check for loads. @@ -469,7 +472,7 @@ static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand& Op, MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI) { - DEBUG(dbgs() << "Move for single use: "; Def->dump()); + LLVM_DEBUG(dbgs() << "Move for single use: "; Def->dump()); MBB.splice(Insert, &MBB, Def); LIS.handleMove(*Def); @@ -496,7 +499,7 @@ static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand& Op, MFI.stackifyVReg(NewReg); - DEBUG(dbgs() << " - Replaced register: "; Def->dump()); + LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump()); } ImposeStackOrdering(Def); @@ -510,8 +513,8 @@ static MachineInstr *RematerializeCheapDef( MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII, const WebAssemblyRegisterInfo *TRI) { - DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump()); - DEBUG(dbgs() << " - for use in "; Op.getParent()->dump()); + LLVM_DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump()); + LLVM_DEBUG(dbgs() << " - for use in "; Op.getParent()->dump()); unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg)); TII->reMaterialize(MBB, Insert, NewReg, 0, Def, *TRI); @@ -522,7 +525,7 @@ static MachineInstr *RematerializeCheapDef( MFI.stackifyVReg(NewReg); ImposeStackOrdering(Clone); - DEBUG(dbgs() << " - Cloned to "; Clone->dump()); + LLVM_DEBUG(dbgs() << " - Cloned to "; Clone->dump()); // Shrink the interval. bool IsDead = MRI.use_empty(Reg); @@ -534,7 +537,7 @@ static MachineInstr *RematerializeCheapDef( // If that was the last use of the original, delete the original. if (IsDead) { - DEBUG(dbgs() << " - Deleting original\n"); + LLVM_DEBUG(dbgs() << " - Deleting original\n"); SlotIndex Idx = LIS.getInstructionIndex(Def).getRegSlot(); LIS.removePhysRegDefAt(WebAssembly::ARGUMENTS, Idx); LIS.removeInterval(Reg); @@ -569,7 +572,7 @@ static MachineInstr *MoveAndTeeForMultiUse( unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB, MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) { - DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump()); + LLVM_DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump()); // Move Def into place. MBB.splice(Insert, &MBB, Def); @@ -605,8 +608,8 @@ static MachineInstr *MoveAndTeeForMultiUse( ImposeStackOrdering(Def); ImposeStackOrdering(Tee); - DEBUG(dbgs() << " - Replaced register: "; Def->dump()); - DEBUG(dbgs() << " - Tee instruction: "; Tee->dump()); + LLVM_DEBUG(dbgs() << " - Replaced register: "; Def->dump()); + LLVM_DEBUG(dbgs() << " - Tee instruction: "; Tee->dump()); return Def; } @@ -733,9 +736,9 @@ public: } // end anonymous namespace bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { - DEBUG(dbgs() << "********** Register Stackifying **********\n" - "********** Function: " - << MF.getName() << '\n'); + LLVM_DEBUG(dbgs() << "********** Register Stackifying **********\n" + "********** Function: " + << MF.getName() << '\n'); bool Changed = false; MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -746,14 +749,6 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); LiveIntervals &LIS = getAnalysis<LiveIntervals>(); - // Disable the TEE optimization if we aren't doing direct wasm object - // emission, because lowering TEE to TEE_LOCAL is done in the ExplicitLocals - // pass, which is also disabled. - bool UseTee = true; - if (MF.getSubtarget<WebAssemblySubtarget>() - .getTargetTriple().isOSBinFormatELF()) - UseTee = false; - // Walk the instructions from the bottom up. Currently we don't look past // block boundaries, and the blocks aren't ordered so the block visitation // order isn't significant, but we may want to change this in the future. @@ -819,7 +814,7 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { Insert = RematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(), LIS, MFI, MRI, TII, TRI); - } else if (UseTee && CanMove && + } else if (CanMove && OneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) { Insert = MoveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI, TII); @@ -867,7 +862,7 @@ bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) { SmallVector<unsigned, 0> Stack; for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { - if (MI.isDebugValue()) + if (MI.isDebugInstr()) continue; for (MachineOperand &MO : reverse(MI.explicit_operands())) { if (!MO.isReg()) diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp index 5e7ebd19fac7..b6481ac2d4ae 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// TargetRegisterInfo class. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h index ad1d71eebf22..2a73dfd4b065 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the WebAssembly implementation of the +/// This file contains the WebAssembly implementation of the /// WebAssemblyRegisterInfo class. /// //===----------------------------------------------------------------------===// @@ -45,6 +45,8 @@ public: const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const override; + // This does not apply to wasm. + const uint32_t *getNoPreservedMask() const override { return nullptr; } }; } // end namespace llvm diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td index 90888100be17..29f42b96b249 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file describes the WebAssembly register classes and some nominal +/// This file describes the WebAssembly register classes and some nominal /// physical registers. /// //===----------------------------------------------------------------------===// @@ -34,13 +34,18 @@ def SP32 : WebAssemblyReg<"%SP32">; def SP64 : WebAssemblyReg<"%SP64">; // The register allocation framework requires register classes have at least -// one register, so we define a few for the floating point register classes -// since we otherwise don't need a physical register in those classes. +// one register, so we define a few for the integer / floating point register +// classes since we otherwise don't need a physical register in those classes. +// These are also used a "types" in the generated assembly matcher. +def I32_0 : WebAssemblyReg<"%i32.0">; +def I64_0 : WebAssemblyReg<"%i64.0">; def F32_0 : WebAssemblyReg<"%f32.0">; def F64_0 : WebAssemblyReg<"%f64.0">; def V128_0: WebAssemblyReg<"%v128">; +def EXCEPT_REF_0 : WebAssemblyReg<"%except_ref.0">; + // The value stack "register". This is an opaque entity which serves to order // uses and defs that must remain in LIFO order. def VALUE_STACK : WebAssemblyReg<"STACK">; @@ -54,9 +59,10 @@ def ARGUMENTS : WebAssemblyReg<"ARGUMENTS">; // Register classes //===----------------------------------------------------------------------===// -def I32 : WebAssemblyRegClass<[i32], 32, (add FP32, SP32)>; -def I64 : WebAssemblyRegClass<[i64], 64, (add FP64, SP64)>; +def I32 : WebAssemblyRegClass<[i32], 32, (add FP32, SP32, I32_0)>; +def I64 : WebAssemblyRegClass<[i64], 64, (add FP64, SP64, I64_0)>; def F32 : WebAssemblyRegClass<[f32], 32, (add F32_0)>; def F64 : WebAssemblyRegClass<[f64], 64, (add F64_0)>; def V128 : WebAssemblyRegClass<[v4f32, v4i32, v16i8, v8i16], 128, (add V128_0)>; +def EXCEPT_REF : WebAssemblyRegClass<[ExceptRef], 0, (add EXCEPT_REF_0)>; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp index 878ffd08d228..f432b367d156 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements a pass that replaces physical registers with +/// This file implements a pass that replaces physical registers with /// virtual registers. /// /// LLVM expects certain physical registers, such as a stack pointer. However, @@ -53,12 +53,16 @@ private: } // end anonymous namespace char WebAssemblyReplacePhysRegs::ID = 0; +INITIALIZE_PASS(WebAssemblyReplacePhysRegs, DEBUG_TYPE, + "Replace physical registers with virtual registers", + false, false) + FunctionPass *llvm::createWebAssemblyReplacePhysRegs() { return new WebAssemblyReplacePhysRegs(); } bool WebAssemblyReplacePhysRegs::runOnMachineFunction(MachineFunction &MF) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "********** Replace Physical Registers **********\n" << "********** Function: " << MF.getName() << '\n'; }); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp index f808c063d7e4..fe8a5e4c06f1 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains signature information for runtime libcalls. +/// This file contains signature information for runtime libcalls. /// /// CodeGen uses external symbols, which it refers to by name. The WebAssembly /// target needs type information for all functions. This file contains a big @@ -22,6 +22,7 @@ #include "WebAssemblyRuntimeLibcallSignatures.h" #include "WebAssemblySubtarget.h" #include "llvm/CodeGen/RuntimeLibcalls.h" +#include "llvm/Support/ManagedStatic.h" using namespace llvm; @@ -58,13 +59,16 @@ enum RuntimeLibcallSignature { i32_func_f32_f32, i32_func_f64_f64, i32_func_i32_i32, + i32_func_i32_i32_iPTR, i64_func_i64_i64, + i64_func_i64_i64_iPTR, i64_i64_func_f32, i64_i64_func_f64, i16_i16_func_i16_i16, i32_i32_func_i32_i32, i64_i64_func_i64_i64, i64_i64_func_i64_i64_i64_i64, + i64_i64_func_i64_i64_i64_i64_iPTR, i64_i64_i64_i64_func_i64_i64_i64_i64, i64_i64_func_i64_i64_i32, iPTR_func_iPTR_i32_iPTR, @@ -84,918 +88,405 @@ enum RuntimeLibcallSignature { unsupported }; -} // end anonymous namespace - -static const RuntimeLibcallSignature -RuntimeLibcallSignatures[RTLIB::UNKNOWN_LIBCALL] = { -// Integer -/* SHL_I16 */ i16_func_i16_i16, -/* SHL_I32 */ i32_func_i32_i32, -/* SHL_I64 */ i64_func_i64_i64, -/* SHL_I128 */ i64_i64_func_i64_i64_i32, -/* SRL_I16 */ i16_func_i16_i16, -/* SRL_I32 */ i32_func_i32_i32, -/* SRL_I64 */ i64_func_i64_i64, -/* SRL_I128 */ i64_i64_func_i64_i64_i32, -/* SRA_I16 */ i16_func_i16_i16, -/* SRA_I32 */ i32_func_i32_i32, -/* SRA_I64 */ i64_func_i64_i64, -/* SRA_I128 */ i64_i64_func_i64_i64_i32, -/* MUL_I8 */ i8_func_i8_i8, -/* MUL_I16 */ i16_func_i16_i16, -/* MUL_I32 */ i32_func_i32_i32, -/* MUL_I64 */ i64_func_i64_i64, -/* MUL_I128 */ i64_i64_func_i64_i64_i64_i64, -/* MULO_I32 */ i32_func_i32_i32, -/* MULO_I64 */ i64_func_i64_i64, -/* MULO_I128 */ i64_i64_func_i64_i64_i64_i64, -/* SDIV_I8 */ i8_func_i8_i8, -/* SDIV_I16 */ i16_func_i16_i16, -/* SDIV_I32 */ i32_func_i32_i32, -/* SDIV_I64 */ i64_func_i64_i64, -/* SDIV_I128 */ i64_i64_func_i64_i64_i64_i64, -/* UDIV_I8 */ i8_func_i8_i8, -/* UDIV_I16 */ i16_func_i16_i16, -/* UDIV_I32 */ i32_func_i32_i32, -/* UDIV_I64 */ i64_func_i64_i64, -/* UDIV_I128 */ i64_i64_func_i64_i64_i64_i64, -/* SREM_I8 */ i8_func_i8_i8, -/* SREM_I16 */ i16_func_i16_i16, -/* SREM_I32 */ i32_func_i32_i32, -/* SREM_I64 */ i64_func_i64_i64, -/* SREM_I128 */ i64_i64_func_i64_i64_i64_i64, -/* UREM_I8 */ i8_func_i8_i8, -/* UREM_I16 */ i16_func_i16_i16, -/* UREM_I32 */ i32_func_i32_i32, -/* UREM_I64 */ i64_func_i64_i64, -/* UREM_I128 */ i64_i64_func_i64_i64_i64_i64, -/* SDIVREM_I8 */ i8_func_i8_i8, -/* SDIVREM_I16 */ i16_i16_func_i16_i16, -/* SDIVREM_I32 */ i32_i32_func_i32_i32, -/* SDIVREM_I64 */ i64_func_i64_i64, -/* SDIVREM_I128 */ i64_i64_i64_i64_func_i64_i64_i64_i64, -/* UDIVREM_I8 */ i8_func_i8_i8, -/* UDIVREM_I16 */ i16_i16_func_i16_i16, -/* UDIVREM_I32 */ i32_i32_func_i32_i32, -/* UDIVREM_I64 */ i64_i64_func_i64_i64, -/* UDIVREM_I128 */ i64_i64_i64_i64_func_i64_i64_i64_i64, -/* NEG_I32 */ i32_func_i32, -/* NEG_I64 */ i64_func_i64, - -// FLOATING POINT -/* ADD_F32 */ f32_func_f32_f32, -/* ADD_F64 */ f64_func_f64_f64, -/* ADD_F80 */ unsupported, -/* ADD_F128 */ func_iPTR_i64_i64_i64_i64, -/* ADD_PPCF128 */ unsupported, -/* SUB_F32 */ f32_func_f32_f32, -/* SUB_F64 */ f64_func_f64_f64, -/* SUB_F80 */ unsupported, -/* SUB_F128 */ func_iPTR_i64_i64_i64_i64, -/* SUB_PPCF128 */ unsupported, -/* MUL_F32 */ f32_func_f32_f32, -/* MUL_F64 */ f64_func_f64_f64, -/* MUL_F80 */ unsupported, -/* MUL_F128 */ func_iPTR_i64_i64_i64_i64, -/* MUL_PPCF128 */ unsupported, -/* DIV_F32 */ f32_func_f32_f32, -/* DIV_F64 */ f64_func_f64_f64, -/* DIV_F80 */ unsupported, -/* DIV_F128 */ func_iPTR_i64_i64_i64_i64, -/* DIV_PPCF128 */ unsupported, -/* REM_F32 */ f32_func_f32_f32, -/* REM_F64 */ f64_func_f64_f64, -/* REM_F80 */ unsupported, -/* REM_F128 */ func_iPTR_i64_i64_i64_i64, -/* REM_PPCF128 */ unsupported, -/* FMA_F32 */ f32_func_f32_f32_f32, -/* FMA_F64 */ f64_func_f64_f64_f64, -/* FMA_F80 */ unsupported, -/* FMA_F128 */ func_iPTR_i64_i64_i64_i64_i64_i64, -/* FMA_PPCF128 */ unsupported, -/* POWI_F32 */ f32_func_f32_i32, -/* POWI_F64 */ f64_func_f64_i32, -/* POWI_F80 */ unsupported, -/* POWI_F128 */ func_iPTR_i64_i64_i64_i64, -/* POWI_PPCF128 */ unsupported, -/* SQRT_F32 */ f32_func_f32, -/* SQRT_F64 */ f64_func_f64, -/* SQRT_F80 */ unsupported, -/* SQRT_F128 */ func_iPTR_i64_i64, -/* SQRT_PPCF128 */ unsupported, -/* LOG_F32 */ f32_func_f32, -/* LOG_F64 */ f64_func_f64, -/* LOG_F80 */ unsupported, -/* LOG_F128 */ func_iPTR_i64_i64, -/* LOG_PPCF128 */ unsupported, -/* LOG2_F32 */ f32_func_f32, -/* LOG2_F64 */ f64_func_f64, -/* LOG2_F80 */ unsupported, -/* LOG2_F128 */ func_iPTR_i64_i64, -/* LOG2_PPCF128 */ unsupported, -/* LOG10_F32 */ f32_func_f32, -/* LOG10_F64 */ f64_func_f64, -/* LOG10_F80 */ unsupported, -/* LOG10_F128 */ func_iPTR_i64_i64, -/* LOG10_PPCF128 */ unsupported, -/* EXP_F32 */ f32_func_f32, -/* EXP_F64 */ f64_func_f64, -/* EXP_F80 */ unsupported, -/* EXP_F128 */ func_iPTR_i64_i64, -/* EXP_PPCF128 */ unsupported, -/* EXP2_F32 */ f32_func_f32, -/* EXP2_F64 */ f64_func_f64, -/* EXP2_F80 */ unsupported, -/* EXP2_F128 */ func_iPTR_i64_i64, -/* EXP2_PPCF128 */ unsupported, -/* SIN_F32 */ f32_func_f32, -/* SIN_F64 */ f64_func_f64, -/* SIN_F80 */ unsupported, -/* SIN_F128 */ func_iPTR_i64_i64, -/* SIN_PPCF128 */ unsupported, -/* COS_F32 */ f32_func_f32, -/* COS_F64 */ f64_func_f64, -/* COS_F80 */ unsupported, -/* COS_F128 */ func_iPTR_i64_i64, -/* COS_PPCF128 */ unsupported, -/* SINCOS_F32 */ func_f32_iPTR_iPTR, -/* SINCOS_F64 */ func_f64_iPTR_iPTR, -/* SINCOS_F80 */ unsupported, -/* SINCOS_F128 */ func_i64_i64_iPTR_iPTR, -/* SINCOS_PPCF128 */ unsupported, -/* SINCOS_STRET_F32 */ unsupported, -/* SINCOS_STRET_F64 */ unsupported, -/* POW_F32 */ f32_func_f32_f32, -/* POW_F64 */ f64_func_f64_f64, -/* POW_F80 */ unsupported, -/* POW_F128 */ func_iPTR_i64_i64_i64_i64, -/* POW_PPCF128 */ unsupported, -/* CEIL_F32 */ f32_func_f32, -/* CEIL_F64 */ f64_func_f64, -/* CEIL_F80 */ unsupported, -/* CEIL_F128 */ func_iPTR_i64_i64, -/* CEIL_PPCF128 */ unsupported, -/* TRUNC_F32 */ f32_func_f32, -/* TRUNC_F64 */ f64_func_f64, -/* TRUNC_F80 */ unsupported, -/* TRUNC_F128 */ func_iPTR_i64_i64, -/* TRUNC_PPCF128 */ unsupported, -/* RINT_F32 */ f32_func_f32, -/* RINT_F64 */ f64_func_f64, -/* RINT_F80 */ unsupported, -/* RINT_F128 */ func_iPTR_i64_i64, -/* RINT_PPCF128 */ unsupported, -/* NEARBYINT_F32 */ f32_func_f32, -/* NEARBYINT_F64 */ f64_func_f64, -/* NEARBYINT_F80 */ unsupported, -/* NEARBYINT_F128 */ func_iPTR_i64_i64, -/* NEARBYINT_PPCF128 */ unsupported, -/* ROUND_F32 */ f32_func_f32, -/* ROUND_F64 */ f64_func_f64, -/* ROUND_F80 */ unsupported, -/* ROUND_F128 */ func_iPTR_i64_i64, -/* ROUND_PPCF128 */ unsupported, -/* FLOOR_F32 */ f32_func_f32, -/* FLOOR_F64 */ f64_func_f64, -/* FLOOR_F80 */ unsupported, -/* FLOOR_F128 */ func_iPTR_i64_i64, -/* FLOOR_PPCF128 */ unsupported, -/* COPYSIGN_F32 */ f32_func_f32_f32, -/* COPYSIGN_F64 */ f64_func_f64_f64, -/* COPYSIGN_F80 */ unsupported, -/* COPYSIGN_F128 */ func_iPTR_i64_i64_i64_i64, -/* COPYSIGN_PPCF128 */ unsupported, -/* FMIN_F32 */ f32_func_f32_f32, -/* FMIN_F64 */ f64_func_f64_f64, -/* FMIN_F80 */ unsupported, -/* FMIN_F128 */ func_iPTR_i64_i64_i64_i64, -/* FMIN_PPCF128 */ unsupported, -/* FMAX_F32 */ f32_func_f32_f32, -/* FMAX_F64 */ f64_func_f64_f64, -/* FMAX_F80 */ unsupported, -/* FMAX_F128 */ func_iPTR_i64_i64_i64_i64, -/* FMAX_PPCF128 */ unsupported, - -// CONVERSION -/* FPEXT_F32_PPCF128 */ unsupported, -/* FPEXT_F64_PPCF128 */ unsupported, -/* FPEXT_F64_F128 */ func_iPTR_f64, -/* FPEXT_F32_F128 */ func_iPTR_f32, -/* FPEXT_F32_F64 */ f64_func_f32, -/* FPEXT_F16_F32 */ f32_func_i16, -/* FPROUND_F32_F16 */ i16_func_f32, -/* FPROUND_F64_F16 */ unsupported, -/* FPROUND_F80_F16 */ unsupported, -/* FPROUND_F128_F16 */ unsupported, -/* FPROUND_PPCF128_F16 */ unsupported, -/* FPROUND_F64_F32 */ f32_func_f64, -/* FPROUND_F80_F32 */ unsupported, -/* FPROUND_F128_F32 */ f32_func_i64_i64, -/* FPROUND_PPCF128_F32 */ unsupported, -/* FPROUND_F80_F64 */ unsupported, -/* FPROUND_F128_F64 */ f64_func_i64_i64, -/* FPROUND_PPCF128_F64 */ unsupported, -/* FPTOSINT_F32_I32 */ i32_func_f32, -/* FPTOSINT_F32_I64 */ i64_func_f32, -/* FPTOSINT_F32_I128 */ i64_i64_func_f32, -/* FPTOSINT_F64_I32 */ i32_func_f64, -/* FPTOSINT_F64_I64 */ i64_func_f64, -/* FPTOSINT_F64_I128 */ i64_i64_func_f64, -/* FPTOSINT_F80_I32 */ unsupported, -/* FPTOSINT_F80_I64 */ unsupported, -/* FPTOSINT_F80_I128 */ unsupported, -/* FPTOSINT_F128_I32 */ i32_func_i64_i64, -/* FPTOSINT_F128_I64 */ i64_func_i64_i64, -/* FPTOSINT_F128_I128 */ i64_i64_func_i64_i64, -/* FPTOSINT_PPCF128_I32 */ unsupported, -/* FPTOSINT_PPCF128_I64 */ unsupported, -/* FPTOSINT_PPCF128_I128 */ unsupported, -/* FPTOUINT_F32_I32 */ i32_func_f32, -/* FPTOUINT_F32_I64 */ i64_func_f32, -/* FPTOUINT_F32_I128 */ i64_i64_func_f32, -/* FPTOUINT_F64_I32 */ i32_func_f64, -/* FPTOUINT_F64_I64 */ i64_func_f64, -/* FPTOUINT_F64_I128 */ i64_i64_func_f64, -/* FPTOUINT_F80_I32 */ unsupported, -/* FPTOUINT_F80_I64 */ unsupported, -/* FPTOUINT_F80_I128 */ unsupported, -/* FPTOUINT_F128_I32 */ i32_func_i64_i64, -/* FPTOUINT_F128_I64 */ i64_func_i64_i64, -/* FPTOUINT_F128_I128 */ i64_i64_func_i64_i64, -/* FPTOUINT_PPCF128_I32 */ unsupported, -/* FPTOUINT_PPCF128_I64 */ unsupported, -/* FPTOUINT_PPCF128_I128 */ unsupported, -/* SINTTOFP_I32_F32 */ f32_func_i32, -/* SINTTOFP_I32_F64 */ f64_func_i32, -/* SINTTOFP_I32_F80 */ unsupported, -/* SINTTOFP_I32_F128 */ func_iPTR_i32, -/* SINTTOFP_I32_PPCF128 */ unsupported, -/* SINTTOFP_I64_F32 */ f32_func_i64, -/* SINTTOFP_I64_F64 */ f64_func_i64, -/* SINTTOFP_I64_F80 */ unsupported, -/* SINTTOFP_I64_F128 */ func_iPTR_i64, -/* SINTTOFP_I64_PPCF128 */ unsupported, -/* SINTTOFP_I128_F32 */ f32_func_i64_i64, -/* SINTTOFP_I128_F64 */ f64_func_i64_i64, -/* SINTTOFP_I128_F80 */ unsupported, -/* SINTTOFP_I128_F128 */ func_iPTR_i64_i64, -/* SINTTOFP_I128_PPCF128 */ unsupported, -/* UINTTOFP_I32_F32 */ f32_func_i32, -/* UINTTOFP_I32_F64 */ f64_func_i64, -/* UINTTOFP_I32_F80 */ unsupported, -/* UINTTOFP_I32_F128 */ func_iPTR_i32, -/* UINTTOFP_I32_PPCF128 */ unsupported, -/* UINTTOFP_I64_F32 */ f32_func_i64, -/* UINTTOFP_I64_F64 */ f64_func_i64, -/* UINTTOFP_I64_F80 */ unsupported, -/* UINTTOFP_I64_F128 */ func_iPTR_i64, -/* UINTTOFP_I64_PPCF128 */ unsupported, -/* UINTTOFP_I128_F32 */ f32_func_i64_i64, -/* UINTTOFP_I128_F64 */ f64_func_i64_i64, -/* UINTTOFP_I128_F80 */ unsupported, -/* UINTTOFP_I128_F128 */ func_iPTR_i64_i64, -/* UINTTOFP_I128_PPCF128 */ unsupported, - -// COMPARISON -/* OEQ_F32 */ i32_func_f32_f32, -/* OEQ_F64 */ i32_func_f64_f64, -/* OEQ_F128 */ i32_func_i64_i64_i64_i64, -/* OEQ_PPCF128 */ unsupported, -/* UNE_F32 */ i32_func_f32_f32, -/* UNE_F64 */ i32_func_f64_f64, -/* UNE_F128 */ i32_func_i64_i64_i64_i64, -/* UNE_PPCF128 */ unsupported, -/* OGE_F32 */ i32_func_f32_f32, -/* OGE_F64 */ i32_func_f64_f64, -/* OGE_F128 */ i32_func_i64_i64_i64_i64, -/* OGE_PPCF128 */ unsupported, -/* OLT_F32 */ i32_func_f32_f32, -/* OLT_F64 */ i32_func_f64_f64, -/* OLT_F128 */ i32_func_i64_i64_i64_i64, -/* OLT_PPCF128 */ unsupported, -/* OLE_F32 */ i32_func_f32_f32, -/* OLE_F64 */ i32_func_f64_f64, -/* OLE_F128 */ i32_func_i64_i64_i64_i64, -/* OLE_PPCF128 */ unsupported, -/* OGT_F32 */ i32_func_f32_f32, -/* OGT_F64 */ i32_func_f64_f64, -/* OGT_F128 */ i32_func_i64_i64_i64_i64, -/* OGT_PPCF128 */ unsupported, -/* UO_F32 */ i32_func_f32_f32, -/* UO_F64 */ i32_func_f64_f64, -/* UO_F128 */ i32_func_i64_i64_i64_i64, -/* UO_PPCF128 */ unsupported, -/* O_F32 */ i32_func_f32_f32, -/* O_F64 */ i32_func_f64_f64, -/* O_F128 */ i32_func_i64_i64_i64_i64, -/* O_PPCF128 */ unsupported, - -// MEMORY -/* MEMCPY */ iPTR_func_iPTR_iPTR_iPTR, -/* MEMMOVE */ iPTR_func_iPTR_iPTR_iPTR, -/* MEMSET */ iPTR_func_iPTR_i32_iPTR, -/* BZERO */ unsupported, - -// ELEMENT-WISE ATOMIC MEMORY -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported, - -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported, - -// EXCEPTION HANDLING -/* UNWIND_RESUME */ unsupported, - -// Note: there's two sets of atomics libcalls; see -// <http://llvm.org/docs/Atomics.html> for more info on the -// difference between them. - -// Atomic '__sync_*' libcalls. -/* SYNC_VAL_COMPARE_AND_SWAP_1 */ unsupported, -/* SYNC_VAL_COMPARE_AND_SWAP_2 */ unsupported, -/* SYNC_VAL_COMPARE_AND_SWAP_4 */ unsupported, -/* SYNC_VAL_COMPARE_AND_SWAP_8 */ unsupported, -/* SYNC_VAL_COMPARE_AND_SWAP_16 */ unsupported, -/* SYNC_LOCK_TEST_AND_SET_1 */ unsupported, -/* SYNC_LOCK_TEST_AND_SET_2 */ unsupported, -/* SYNC_LOCK_TEST_AND_SET_4 */ unsupported, -/* SYNC_LOCK_TEST_AND_SET_8 */ unsupported, -/* SYNC_LOCK_TEST_AND_SET_16 */ unsupported, -/* SYNC_FETCH_AND_ADD_1 */ unsupported, -/* SYNC_FETCH_AND_ADD_2 */ unsupported, -/* SYNC_FETCH_AND_ADD_4 */ unsupported, -/* SYNC_FETCH_AND_ADD_8 */ unsupported, -/* SYNC_FETCH_AND_ADD_16 */ unsupported, -/* SYNC_FETCH_AND_SUB_1 */ unsupported, -/* SYNC_FETCH_AND_SUB_2 */ unsupported, -/* SYNC_FETCH_AND_SUB_4 */ unsupported, -/* SYNC_FETCH_AND_SUB_8 */ unsupported, -/* SYNC_FETCH_AND_SUB_16 */ unsupported, -/* SYNC_FETCH_AND_AND_1 */ unsupported, -/* SYNC_FETCH_AND_AND_2 */ unsupported, -/* SYNC_FETCH_AND_AND_4 */ unsupported, -/* SYNC_FETCH_AND_AND_8 */ unsupported, -/* SYNC_FETCH_AND_AND_16 */ unsupported, -/* SYNC_FETCH_AND_OR_1 */ unsupported, -/* SYNC_FETCH_AND_OR_2 */ unsupported, -/* SYNC_FETCH_AND_OR_4 */ unsupported, -/* SYNC_FETCH_AND_OR_8 */ unsupported, -/* SYNC_FETCH_AND_OR_16 */ unsupported, -/* SYNC_FETCH_AND_XOR_1 */ unsupported, -/* SYNC_FETCH_AND_XOR_2 */ unsupported, -/* SYNC_FETCH_AND_XOR_4 */ unsupported, -/* SYNC_FETCH_AND_XOR_8 */ unsupported, -/* SYNC_FETCH_AND_XOR_16 */ unsupported, -/* SYNC_FETCH_AND_NAND_1 */ unsupported, -/* SYNC_FETCH_AND_NAND_2 */ unsupported, -/* SYNC_FETCH_AND_NAND_4 */ unsupported, -/* SYNC_FETCH_AND_NAND_8 */ unsupported, -/* SYNC_FETCH_AND_NAND_16 */ unsupported, -/* SYNC_FETCH_AND_MAX_1 */ unsupported, -/* SYNC_FETCH_AND_MAX_2 */ unsupported, -/* SYNC_FETCH_AND_MAX_4 */ unsupported, -/* SYNC_FETCH_AND_MAX_8 */ unsupported, -/* SYNC_FETCH_AND_MAX_16 */ unsupported, -/* SYNC_FETCH_AND_UMAX_1 */ unsupported, -/* SYNC_FETCH_AND_UMAX_2 */ unsupported, -/* SYNC_FETCH_AND_UMAX_4 */ unsupported, -/* SYNC_FETCH_AND_UMAX_8 */ unsupported, -/* SYNC_FETCH_AND_UMAX_16 */ unsupported, -/* SYNC_FETCH_AND_MIN_1 */ unsupported, -/* SYNC_FETCH_AND_MIN_2 */ unsupported, -/* SYNC_FETCH_AND_MIN_4 */ unsupported, -/* SYNC_FETCH_AND_MIN_8 */ unsupported, -/* SYNC_FETCH_AND_MIN_16 */ unsupported, -/* SYNC_FETCH_AND_UMIN_1 */ unsupported, -/* SYNC_FETCH_AND_UMIN_2 */ unsupported, -/* SYNC_FETCH_AND_UMIN_4 */ unsupported, -/* SYNC_FETCH_AND_UMIN_8 */ unsupported, -/* SYNC_FETCH_AND_UMIN_16 */ unsupported, - -// Atomic '__atomic_*' libcalls. -/* ATOMIC_LOAD */ unsupported, -/* ATOMIC_LOAD_1 */ unsupported, -/* ATOMIC_LOAD_2 */ unsupported, -/* ATOMIC_LOAD_4 */ unsupported, -/* ATOMIC_LOAD_8 */ unsupported, -/* ATOMIC_LOAD_16 */ unsupported, - -/* ATOMIC_STORE */ unsupported, -/* ATOMIC_STORE_1 */ unsupported, -/* ATOMIC_STORE_2 */ unsupported, -/* ATOMIC_STORE_4 */ unsupported, -/* ATOMIC_STORE_8 */ unsupported, -/* ATOMIC_STORE_16 */ unsupported, - -/* ATOMIC_EXCHANGE */ unsupported, -/* ATOMIC_EXCHANGE_1 */ unsupported, -/* ATOMIC_EXCHANGE_2 */ unsupported, -/* ATOMIC_EXCHANGE_4 */ unsupported, -/* ATOMIC_EXCHANGE_8 */ unsupported, -/* ATOMIC_EXCHANGE_16 */ unsupported, - -/* ATOMIC_COMPARE_EXCHANGE */ unsupported, -/* ATOMIC_COMPARE_EXCHANGE_1 */ unsupported, -/* ATOMIC_COMPARE_EXCHANGE_2 */ unsupported, -/* ATOMIC_COMPARE_EXCHANGE_4 */ unsupported, -/* ATOMIC_COMPARE_EXCHANGE_8 */ unsupported, -/* ATOMIC_COMPARE_EXCHANGE_16 */ unsupported, - -/* ATOMIC_FETCH_ADD_1 */ unsupported, -/* ATOMIC_FETCH_ADD_2 */ unsupported, -/* ATOMIC_FETCH_ADD_4 */ unsupported, -/* ATOMIC_FETCH_ADD_8 */ unsupported, -/* ATOMIC_FETCH_ADD_16 */ unsupported, - -/* ATOMIC_FETCH_SUB_1 */ unsupported, -/* ATOMIC_FETCH_SUB_2 */ unsupported, -/* ATOMIC_FETCH_SUB_4 */ unsupported, -/* ATOMIC_FETCH_SUB_8 */ unsupported, -/* ATOMIC_FETCH_SUB_16 */ unsupported, - -/* ATOMIC_FETCH_AND_1 */ unsupported, -/* ATOMIC_FETCH_AND_2 */ unsupported, -/* ATOMIC_FETCH_AND_4 */ unsupported, -/* ATOMIC_FETCH_AND_8 */ unsupported, -/* ATOMIC_FETCH_AND_16 */ unsupported, - -/* ATOMIC_FETCH_OR_1 */ unsupported, -/* ATOMIC_FETCH_OR_2 */ unsupported, -/* ATOMIC_FETCH_OR_4 */ unsupported, -/* ATOMIC_FETCH_OR_8 */ unsupported, -/* ATOMIC_FETCH_OR_16 */ unsupported, - -/* ATOMIC_FETCH_XOR_1 */ unsupported, -/* ATOMIC_FETCH_XOR_2 */ unsupported, -/* ATOMIC_FETCH_XOR_4 */ unsupported, -/* ATOMIC_FETCH_XOR_8 */ unsupported, -/* ATOMIC_FETCH_XOR_16 */ unsupported, - -/* ATOMIC_FETCH_NAND_1 */ unsupported, -/* ATOMIC_FETCH_NAND_2 */ unsupported, -/* ATOMIC_FETCH_NAND_4 */ unsupported, -/* ATOMIC_FETCH_NAND_8 */ unsupported, -/* ATOMIC_FETCH_NAND_16 */ unsupported, - -// Stack Protector Fail. -/* STACKPROTECTOR_CHECK_FAIL */ func, - -// Deoptimization. -/* DEOPTIMIZE */ unsupported, +struct RuntimeLibcallSignatureTable { + std::vector<RuntimeLibcallSignature> Table; + + // Any newly-added libcalls will be unsupported by default. + RuntimeLibcallSignatureTable() : Table(RTLIB::UNKNOWN_LIBCALL, unsupported) { + // Integer + Table[RTLIB::SHL_I16] = i16_func_i16_i16; + Table[RTLIB::SHL_I32] = i32_func_i32_i32; + Table[RTLIB::SHL_I64] = i64_func_i64_i64; + Table[RTLIB::SHL_I128] = i64_i64_func_i64_i64_i32; + Table[RTLIB::SRL_I16] = i16_func_i16_i16; + Table[RTLIB::SRL_I32] = i32_func_i32_i32; + Table[RTLIB::SRL_I64] = i64_func_i64_i64; + Table[RTLIB::SRL_I128] = i64_i64_func_i64_i64_i32; + Table[RTLIB::SRA_I16] = i16_func_i16_i16; + Table[RTLIB::SRA_I32] = i32_func_i32_i32; + Table[RTLIB::SRA_I64] = i64_func_i64_i64; + Table[RTLIB::SRA_I128] = i64_i64_func_i64_i64_i32; + Table[RTLIB::MUL_I8] = i8_func_i8_i8; + Table[RTLIB::MUL_I16] = i16_func_i16_i16; + Table[RTLIB::MUL_I32] = i32_func_i32_i32; + Table[RTLIB::MUL_I64] = i64_func_i64_i64; + Table[RTLIB::MUL_I128] = i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::MULO_I32] = i32_func_i32_i32_iPTR; + Table[RTLIB::MULO_I64] = i64_func_i64_i64_iPTR; + Table[RTLIB::MULO_I128] = i64_i64_func_i64_i64_i64_i64_iPTR; + Table[RTLIB::SDIV_I8] = i8_func_i8_i8; + Table[RTLIB::SDIV_I16] = i16_func_i16_i16; + Table[RTLIB::SDIV_I32] = i32_func_i32_i32; + Table[RTLIB::SDIV_I64] = i64_func_i64_i64; + Table[RTLIB::SDIV_I128] = i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::UDIV_I8] = i8_func_i8_i8; + Table[RTLIB::UDIV_I16] = i16_func_i16_i16; + Table[RTLIB::UDIV_I32] = i32_func_i32_i32; + Table[RTLIB::UDIV_I64] = i64_func_i64_i64; + Table[RTLIB::UDIV_I128] = i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::SREM_I8] = i8_func_i8_i8; + Table[RTLIB::SREM_I16] = i16_func_i16_i16; + Table[RTLIB::SREM_I32] = i32_func_i32_i32; + Table[RTLIB::SREM_I64] = i64_func_i64_i64; + Table[RTLIB::SREM_I128] = i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::UREM_I8] = i8_func_i8_i8; + Table[RTLIB::UREM_I16] = i16_func_i16_i16; + Table[RTLIB::UREM_I32] = i32_func_i32_i32; + Table[RTLIB::UREM_I64] = i64_func_i64_i64; + Table[RTLIB::UREM_I128] = i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::SDIVREM_I8] = i8_func_i8_i8; + Table[RTLIB::SDIVREM_I16] = i16_i16_func_i16_i16; + Table[RTLIB::SDIVREM_I32] = i32_i32_func_i32_i32; + Table[RTLIB::SDIVREM_I64] = i64_func_i64_i64; + Table[RTLIB::SDIVREM_I128] = i64_i64_i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::UDIVREM_I8] = i8_func_i8_i8; + Table[RTLIB::UDIVREM_I16] = i16_i16_func_i16_i16; + Table[RTLIB::UDIVREM_I32] = i32_i32_func_i32_i32; + Table[RTLIB::UDIVREM_I64] = i64_i64_func_i64_i64; + Table[RTLIB::UDIVREM_I128] = i64_i64_i64_i64_func_i64_i64_i64_i64; + Table[RTLIB::NEG_I32] = i32_func_i32; + Table[RTLIB::NEG_I64] = i64_func_i64; + + // Floating-point. + // All F80 and PPCF128 routines are unsupported. + Table[RTLIB::ADD_F32] = f32_func_f32_f32; + Table[RTLIB::ADD_F64] = f64_func_f64_f64; + Table[RTLIB::ADD_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::SUB_F32] = f32_func_f32_f32; + Table[RTLIB::SUB_F64] = f64_func_f64_f64; + Table[RTLIB::SUB_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::MUL_F32] = f32_func_f32_f32; + Table[RTLIB::MUL_F64] = f64_func_f64_f64; + Table[RTLIB::MUL_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::DIV_F32] = f32_func_f32_f32; + Table[RTLIB::DIV_F64] = f64_func_f64_f64; + Table[RTLIB::DIV_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::REM_F32] = f32_func_f32_f32; + Table[RTLIB::REM_F64] = f64_func_f64_f64; + Table[RTLIB::REM_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::FMA_F32] = f32_func_f32_f32_f32; + Table[RTLIB::FMA_F64] = f64_func_f64_f64_f64; + Table[RTLIB::FMA_F128] = func_iPTR_i64_i64_i64_i64_i64_i64; + Table[RTLIB::POWI_F32] = f32_func_f32_i32; + Table[RTLIB::POWI_F64] = f64_func_f64_i32; + Table[RTLIB::POWI_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::SQRT_F32] = f32_func_f32; + Table[RTLIB::SQRT_F64] = f64_func_f64; + Table[RTLIB::SQRT_F128] = func_iPTR_i64_i64; + Table[RTLIB::LOG_F32] = f32_func_f32; + Table[RTLIB::LOG_F64] = f64_func_f64; + Table[RTLIB::LOG_F128] = func_iPTR_i64_i64; + Table[RTLIB::LOG2_F32] = f32_func_f32; + Table[RTLIB::LOG2_F64] = f64_func_f64; + Table[RTLIB::LOG2_F128] = func_iPTR_i64_i64; + Table[RTLIB::LOG10_F32] = f32_func_f32; + Table[RTLIB::LOG10_F64] = f64_func_f64; + Table[RTLIB::LOG10_F128] = func_iPTR_i64_i64; + Table[RTLIB::EXP_F32] = f32_func_f32; + Table[RTLIB::EXP_F64] = f64_func_f64; + Table[RTLIB::EXP_F128] = func_iPTR_i64_i64; + Table[RTLIB::EXP2_F32] = f32_func_f32; + Table[RTLIB::EXP2_F64] = f64_func_f64; + Table[RTLIB::EXP2_F128] = func_iPTR_i64_i64; + Table[RTLIB::SIN_F32] = f32_func_f32; + Table[RTLIB::SIN_F64] = f64_func_f64; + Table[RTLIB::SIN_F128] = func_iPTR_i64_i64; + Table[RTLIB::COS_F32] = f32_func_f32; + Table[RTLIB::COS_F64] = f64_func_f64; + Table[RTLIB::COS_F128] = func_iPTR_i64_i64; + Table[RTLIB::SINCOS_F32] = func_f32_iPTR_iPTR; + Table[RTLIB::SINCOS_F64] = func_f64_iPTR_iPTR; + Table[RTLIB::SINCOS_F128] = func_i64_i64_iPTR_iPTR; + Table[RTLIB::POW_F32] = f32_func_f32_f32; + Table[RTLIB::POW_F64] = f64_func_f64_f64; + Table[RTLIB::POW_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::CEIL_F32] = f32_func_f32; + Table[RTLIB::CEIL_F64] = f64_func_f64; + Table[RTLIB::CEIL_F128] = func_iPTR_i64_i64; + Table[RTLIB::TRUNC_F32] = f32_func_f32; + Table[RTLIB::TRUNC_F64] = f64_func_f64; + Table[RTLIB::TRUNC_F128] = func_iPTR_i64_i64; + Table[RTLIB::RINT_F32] = f32_func_f32; + Table[RTLIB::RINT_F64] = f64_func_f64; + Table[RTLIB::RINT_F128] = func_iPTR_i64_i64; + Table[RTLIB::NEARBYINT_F32] = f32_func_f32; + Table[RTLIB::NEARBYINT_F64] = f64_func_f64; + Table[RTLIB::NEARBYINT_F128] = func_iPTR_i64_i64; + Table[RTLIB::ROUND_F32] = f32_func_f32; + Table[RTLIB::ROUND_F64] = f64_func_f64; + Table[RTLIB::ROUND_F128] = func_iPTR_i64_i64; + Table[RTLIB::FLOOR_F32] = f32_func_f32; + Table[RTLIB::FLOOR_F64] = f64_func_f64; + Table[RTLIB::FLOOR_F128] = func_iPTR_i64_i64; + Table[RTLIB::COPYSIGN_F32] = f32_func_f32_f32; + Table[RTLIB::COPYSIGN_F64] = f64_func_f64_f64; + Table[RTLIB::COPYSIGN_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::FMIN_F32] = f32_func_f32_f32; + Table[RTLIB::FMIN_F64] = f64_func_f64_f64; + Table[RTLIB::FMIN_F128] = func_iPTR_i64_i64_i64_i64; + Table[RTLIB::FMAX_F32] = f32_func_f32_f32; + Table[RTLIB::FMAX_F64] = f64_func_f64_f64; + Table[RTLIB::FMAX_F128] = func_iPTR_i64_i64_i64_i64; + + // Conversion + // All F80 and PPCF128 routines are unspported. + Table[RTLIB::FPEXT_F64_F128] = func_iPTR_f64; + Table[RTLIB::FPEXT_F32_F128] = func_iPTR_f32; + Table[RTLIB::FPEXT_F32_F64] = f64_func_f32; + Table[RTLIB::FPEXT_F16_F32] = f32_func_i16; + Table[RTLIB::FPROUND_F32_F16] = i16_func_f32; + Table[RTLIB::FPROUND_F64_F32] = f32_func_f64; + Table[RTLIB::FPROUND_F128_F32] = f32_func_i64_i64; + Table[RTLIB::FPROUND_F128_F64] = f64_func_i64_i64; + Table[RTLIB::FPTOSINT_F32_I32] = i32_func_f32; + Table[RTLIB::FPTOSINT_F32_I64] = i64_func_f32; + Table[RTLIB::FPTOSINT_F32_I128] = i64_i64_func_f32; + Table[RTLIB::FPTOSINT_F64_I32] = i32_func_f64; + Table[RTLIB::FPTOSINT_F64_I64] = i64_func_f64; + Table[RTLIB::FPTOSINT_F64_I128] = i64_i64_func_f64; + Table[RTLIB::FPTOSINT_F128_I32] = i32_func_i64_i64; + Table[RTLIB::FPTOSINT_F128_I64] = i64_func_i64_i64; + Table[RTLIB::FPTOSINT_F128_I128] = i64_i64_func_i64_i64; + Table[RTLIB::FPTOUINT_F32_I32] = i32_func_f32; + Table[RTLIB::FPTOUINT_F32_I64] = i64_func_f32; + Table[RTLIB::FPTOUINT_F32_I128] = i64_i64_func_f32; + Table[RTLIB::FPTOUINT_F64_I32] = i32_func_f64; + Table[RTLIB::FPTOUINT_F64_I64] = i64_func_f64; + Table[RTLIB::FPTOUINT_F64_I128] = i64_i64_func_f64; + Table[RTLIB::FPTOUINT_F128_I32] = i32_func_i64_i64; + Table[RTLIB::FPTOUINT_F128_I64] = i64_func_i64_i64; + Table[RTLIB::FPTOUINT_F128_I128] = i64_i64_func_i64_i64; + Table[RTLIB::SINTTOFP_I32_F32] = f32_func_i32; + Table[RTLIB::SINTTOFP_I32_F64] = f64_func_i32; + Table[RTLIB::SINTTOFP_I32_F128] = func_iPTR_i32; + Table[RTLIB::SINTTOFP_I64_F32] = f32_func_i64; + Table[RTLIB::SINTTOFP_I64_F64] = f64_func_i64; + Table[RTLIB::SINTTOFP_I64_F128] = func_iPTR_i64; + Table[RTLIB::SINTTOFP_I128_F32] = f32_func_i64_i64; + Table[RTLIB::SINTTOFP_I128_F64] = f64_func_i64_i64; + Table[RTLIB::SINTTOFP_I128_F128] = func_iPTR_i64_i64; + Table[RTLIB::UINTTOFP_I32_F32] = f32_func_i32; + Table[RTLIB::UINTTOFP_I32_F64] = f64_func_i64; + Table[RTLIB::UINTTOFP_I32_F128] = func_iPTR_i32; + Table[RTLIB::UINTTOFP_I64_F32] = f32_func_i64; + Table[RTLIB::UINTTOFP_I64_F64] = f64_func_i64; + Table[RTLIB::UINTTOFP_I64_F128] = func_iPTR_i64; + Table[RTLIB::UINTTOFP_I128_F32] = f32_func_i64_i64; + Table[RTLIB::UINTTOFP_I128_F64] = f64_func_i64_i64; + Table[RTLIB::UINTTOFP_I128_F128] = func_iPTR_i64_i64; + + // Comparison + // ALl F80 and PPCF128 routines are unsupported. + Table[RTLIB::OEQ_F32] = i32_func_f32_f32; + Table[RTLIB::OEQ_F64] = i32_func_f64_f64; + Table[RTLIB::OEQ_F128] = i32_func_i64_i64_i64_i64; + Table[RTLIB::UNE_F32] = i32_func_f32_f32; + Table[RTLIB::UNE_F64] = i32_func_f64_f64; + Table[RTLIB::UNE_F128] = i32_func_i64_i64_i64_i64; + Table[RTLIB::OGE_F32] = i32_func_f32_f32; + Table[RTLIB::OGE_F64] = i32_func_f64_f64; + Table[RTLIB::OGE_F128] = i32_func_i64_i64_i64_i64; + Table[RTLIB::OLT_F32] = i32_func_f32_f32; + Table[RTLIB::OLT_F64] = i32_func_f64_f64; + Table[RTLIB::OLT_F128] = i32_func_i64_i64_i64_i64; + Table[RTLIB::OLE_F32] = i32_func_f32_f32; + Table[RTLIB::OLE_F64] = i32_func_f64_f64; + Table[RTLIB::OLE_F128] = i32_func_i64_i64_i64_i64; + Table[RTLIB::OGT_F32] = i32_func_f32_f32; + Table[RTLIB::OGT_F64] = i32_func_f64_f64; + Table[RTLIB::OGT_F128] = i32_func_i64_i64_i64_i64; + Table[RTLIB::UO_F32] = i32_func_f32_f32; + Table[RTLIB::UO_F64] = i32_func_f64_f64; + Table[RTLIB::UO_F128] = i32_func_i64_i64_i64_i64; + // O_FXX has the weird property that it uses the same libcall name as UO_FXX + // This breaks our name-based lookup. Fortunately only the UO family of + // libcalls appears to be actually used. + Table[RTLIB::O_F32] = unsupported; + Table[RTLIB::O_F64] = unsupported; + Table[RTLIB::O_F128] = unsupported; + + // Memory + Table[RTLIB::MEMCPY] = iPTR_func_iPTR_iPTR_iPTR; + Table[RTLIB::MEMSET] = iPTR_func_iPTR_i32_iPTR; + Table[RTLIB::MEMMOVE] = iPTR_func_iPTR_iPTR_iPTR; + + // Element-wise Atomic memory + // TODO: Fix these when we implement atomic support + Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_1] = unsupported; + Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_2] = unsupported; + Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_4] = unsupported; + Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_8] = unsupported; + Table[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_16] = unsupported; + Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1] = unsupported; + Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2] = unsupported; + Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4] = unsupported; + Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8] = unsupported; + Table[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16] = unsupported; + + Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_1] = unsupported; + Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_2] = unsupported; + Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_4] = unsupported; + Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_8] = unsupported; + Table[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_16] = unsupported; + + // Atomic '__sync_*' libcalls. + // TODO: Fix these when we implement atomic support + Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = unsupported; + Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = unsupported; + Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = unsupported; + Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = unsupported; + Table[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = unsupported; + Table[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = unsupported; + Table[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = unsupported; + Table[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = unsupported; + Table[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = unsupported; + Table[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_ADD_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_ADD_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_ADD_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_ADD_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_ADD_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_SUB_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_SUB_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_SUB_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_SUB_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_SUB_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_AND_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_AND_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_AND_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_AND_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_AND_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_OR_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_OR_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_OR_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_OR_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_OR_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_XOR_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_XOR_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_XOR_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_XOR_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_XOR_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_NAND_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_NAND_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_NAND_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_NAND_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_NAND_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MAX_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MAX_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MAX_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MAX_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MAX_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMAX_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMAX_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMAX_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMAX_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMAX_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MIN_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MIN_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MIN_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MIN_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_MIN_16] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMIN_1] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMIN_2] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMIN_4] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMIN_8] = unsupported; + Table[RTLIB::SYNC_FETCH_AND_UMIN_16] = unsupported; + + // Atomic '__atomic_*' libcalls. + // TODO: Fix these when we implement atomic support + Table[RTLIB::ATOMIC_LOAD] = unsupported; + Table[RTLIB::ATOMIC_LOAD_1] = unsupported; + Table[RTLIB::ATOMIC_LOAD_2] = unsupported; + Table[RTLIB::ATOMIC_LOAD_4] = unsupported; + Table[RTLIB::ATOMIC_LOAD_8] = unsupported; + Table[RTLIB::ATOMIC_LOAD_16] = unsupported; + + Table[RTLIB::ATOMIC_STORE] = unsupported; + Table[RTLIB::ATOMIC_STORE_1] = unsupported; + Table[RTLIB::ATOMIC_STORE_2] = unsupported; + Table[RTLIB::ATOMIC_STORE_4] = unsupported; + Table[RTLIB::ATOMIC_STORE_8] = unsupported; + Table[RTLIB::ATOMIC_STORE_16] = unsupported; + + Table[RTLIB::ATOMIC_EXCHANGE] = unsupported; + Table[RTLIB::ATOMIC_EXCHANGE_1] = unsupported; + Table[RTLIB::ATOMIC_EXCHANGE_2] = unsupported; + Table[RTLIB::ATOMIC_EXCHANGE_4] = unsupported; + Table[RTLIB::ATOMIC_EXCHANGE_8] = unsupported; + Table[RTLIB::ATOMIC_EXCHANGE_16] = unsupported; + + Table[RTLIB::ATOMIC_COMPARE_EXCHANGE] = unsupported; + Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_1] = unsupported; + Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_2] = unsupported; + Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_4] = unsupported; + Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_8] = unsupported; + Table[RTLIB::ATOMIC_COMPARE_EXCHANGE_16] = unsupported; + + Table[RTLIB::ATOMIC_FETCH_ADD_1] = unsupported; + Table[RTLIB::ATOMIC_FETCH_ADD_2] = unsupported; + Table[RTLIB::ATOMIC_FETCH_ADD_4] = unsupported; + Table[RTLIB::ATOMIC_FETCH_ADD_8] = unsupported; + Table[RTLIB::ATOMIC_FETCH_ADD_16] = unsupported; + + Table[RTLIB::ATOMIC_FETCH_SUB_1] = unsupported; + Table[RTLIB::ATOMIC_FETCH_SUB_2] = unsupported; + Table[RTLIB::ATOMIC_FETCH_SUB_4] = unsupported; + Table[RTLIB::ATOMIC_FETCH_SUB_8] = unsupported; + Table[RTLIB::ATOMIC_FETCH_SUB_16] = unsupported; + + Table[RTLIB::ATOMIC_FETCH_AND_1] = unsupported; + Table[RTLIB::ATOMIC_FETCH_AND_2] = unsupported; + Table[RTLIB::ATOMIC_FETCH_AND_4] = unsupported; + Table[RTLIB::ATOMIC_FETCH_AND_8] = unsupported; + Table[RTLIB::ATOMIC_FETCH_AND_16] = unsupported; + + Table[RTLIB::ATOMIC_FETCH_OR_1] = unsupported; + Table[RTLIB::ATOMIC_FETCH_OR_2] = unsupported; + Table[RTLIB::ATOMIC_FETCH_OR_4] = unsupported; + Table[RTLIB::ATOMIC_FETCH_OR_8] = unsupported; + Table[RTLIB::ATOMIC_FETCH_OR_16] = unsupported; + + Table[RTLIB::ATOMIC_FETCH_XOR_1] = unsupported; + Table[RTLIB::ATOMIC_FETCH_XOR_2] = unsupported; + Table[RTLIB::ATOMIC_FETCH_XOR_4] = unsupported; + Table[RTLIB::ATOMIC_FETCH_XOR_8] = unsupported; + Table[RTLIB::ATOMIC_FETCH_XOR_16] = unsupported; + + Table[RTLIB::ATOMIC_FETCH_NAND_1] = unsupported; + Table[RTLIB::ATOMIC_FETCH_NAND_2] = unsupported; + Table[RTLIB::ATOMIC_FETCH_NAND_4] = unsupported; + Table[RTLIB::ATOMIC_FETCH_NAND_8] = unsupported; + Table[RTLIB::ATOMIC_FETCH_NAND_16] = unsupported; + } }; -static const char * -RuntimeLibcallNames[RTLIB::UNKNOWN_LIBCALL] = { -/* SHL_I16 */ "__ashlhi3", -/* SHL_I32 */ "__ashlsi3", -/* SHL_I64 */ "__ashldi3", -/* SHL_I128 */ "__ashlti3", -/* SRL_I16 */ "__lshrhi3", -/* SRL_I32 */ "__lshrsi3", -/* SRL_I64 */ "__lshrdi3", -/* SRL_I128 */ "__lshrti3", -/* SRA_I16 */ "__ashrhi3", -/* SRA_I32 */ "__ashrsi3", -/* SRA_I64 */ "__ashrdi3", -/* SRA_I128 */ "__ashrti3", -/* MUL_I8 */ "__mulqi3", -/* MUL_I16 */ "__mulhi3", -/* MUL_I32 */ "__mulsi3", -/* MUL_I64 */ "__muldi3", -/* MUL_I128 */ "__multi3", -/* MULO_I32 */ "__mulosi4", -/* MULO_I64 */ "__mulodi4", -/* MULO_I128 */ "__muloti4", -/* SDIV_I8 */ "__divqi3", -/* SDIV_I16 */ "__divhi3", -/* SDIV_I32 */ "__divsi3", -/* SDIV_I64 */ "__divdi3", -/* SDIV_I128 */ "__divti3", -/* UDIV_I8 */ "__udivqi3", -/* UDIV_I16 */ "__udivhi3", -/* UDIV_I32 */ "__udivsi3", -/* UDIV_I64 */ "__udivdi3", -/* UDIV_I128 */ "__udivti3", -/* SREM_I8 */ "__modqi3", -/* SREM_I16 */ "__modhi3", -/* SREM_I32 */ "__modsi3", -/* SREM_I64 */ "__moddi3", -/* SREM_I128 */ "__modti3", -/* UREM_I8 */ "__umodqi3", -/* UREM_I16 */ "__umodhi3", -/* UREM_I32 */ "__umodsi3", -/* UREM_I64 */ "__umoddi3", -/* UREM_I128 */ "__umodti3", -/* SDIVREM_I8 */ nullptr, -/* SDIVREM_I16 */ nullptr, -/* SDIVREM_I32 */ nullptr, -/* SDIVREM_I64 */ nullptr, -/* SDIVREM_I128 */ nullptr, -/* UDIVREM_I8 */ nullptr, -/* UDIVREM_I16 */ nullptr, -/* UDIVREM_I32 */ nullptr, -/* UDIVREM_I64 */ nullptr, -/* UDIVREM_I128 */ nullptr, -/* NEG_I32 */ "__negsi2", -/* NEG_I64 */ "__negdi2", -/* ADD_F32 */ "__addsf3", -/* ADD_F64 */ "__adddf3", -/* ADD_F80 */ nullptr, -/* ADD_F128 */ "__addtf3", -/* ADD_PPCF128 */ nullptr, -/* SUB_F32 */ "__subsf3", -/* SUB_F64 */ "__subdf3", -/* SUB_F80 */ nullptr, -/* SUB_F128 */ "__subtf3", -/* SUB_PPCF128 */ nullptr, -/* MUL_F32 */ "__mulsf3", -/* MUL_F64 */ "__muldf3", -/* MUL_F80 */ nullptr, -/* MUL_F128 */ "__multf3", -/* MUL_PPCF128 */ nullptr, -/* DIV_F32 */ "__divsf3", -/* DIV_F64 */ "__divdf3", -/* DIV_F80 */ nullptr, -/* DIV_F128 */ "__divtf3", -/* DIV_PPCF128 */ nullptr, -/* REM_F32 */ "fmodf", -/* REM_F64 */ "fmod", -/* REM_F80 */ nullptr, -/* REM_F128 */ "fmodl", -/* REM_PPCF128 */ nullptr, -/* FMA_F32 */ "fmaf", -/* FMA_F64 */ "fma", -/* FMA_F80 */ nullptr, -/* FMA_F128 */ "fmal", -/* FMA_PPCF128 */ nullptr, -/* POWI_F32 */ "__powisf2", -/* POWI_F64 */ "__powidf2", -/* POWI_F80 */ nullptr, -/* POWI_F128 */ "__powitf2", -/* POWI_PPCF128 */ nullptr, -/* SQRT_F32 */ "sqrtf", -/* SQRT_F64 */ "sqrt", -/* SQRT_F80 */ nullptr, -/* SQRT_F128 */ "sqrtl", -/* SQRT_PPCF128 */ nullptr, -/* LOG_F32 */ "logf", -/* LOG_F64 */ "log", -/* LOG_F80 */ nullptr, -/* LOG_F128 */ "logl", -/* LOG_PPCF128 */ nullptr, -/* LOG2_F32 */ "log2f", -/* LOG2_F64 */ "log2", -/* LOG2_F80 */ nullptr, -/* LOG2_F128 */ "log2l", -/* LOG2_PPCF128 */ nullptr, -/* LOG10_F32 */ "log10f", -/* LOG10_F64 */ "log10", -/* LOG10_F80 */ nullptr, -/* LOG10_F128 */ "log10l", -/* LOG10_PPCF128 */ nullptr, -/* EXP_F32 */ "expf", -/* EXP_F64 */ "exp", -/* EXP_F80 */ nullptr, -/* EXP_F128 */ "expl", -/* EXP_PPCF128 */ nullptr, -/* EXP2_F32 */ "exp2f", -/* EXP2_F64 */ "exp2", -/* EXP2_F80 */ nullptr, -/* EXP2_F128 */ "exp2l", -/* EXP2_PPCF128 */ nullptr, -/* SIN_F32 */ "sinf", -/* SIN_F64 */ "sin", -/* SIN_F80 */ nullptr, -/* SIN_F128 */ "sinl", -/* SIN_PPCF128 */ nullptr, -/* COS_F32 */ "cosf", -/* COS_F64 */ "cos", -/* COS_F80 */ nullptr, -/* COS_F128 */ "cosl", -/* COS_PPCF128 */ nullptr, -/* SINCOS_F32 */ "sincosf", -/* SINCOS_F64 */ "sincos", -/* SINCOS_F80 */ nullptr, -/* SINCOS_F128 */ "sincosl", -/* SINCOS_PPCF128 */ nullptr, -/* SINCOS_STRET_F32 */ nullptr, -/* SINCOS_STRET_F64 */ nullptr, -/* POW_F32 */ "powf", -/* POW_F64 */ "pow", -/* POW_F80 */ nullptr, -/* POW_F128 */ "powl", -/* POW_PPCF128 */ nullptr, -/* CEIL_F32 */ "ceilf", -/* CEIL_F64 */ "ceil", -/* CEIL_F80 */ nullptr, -/* CEIL_F128 */ "ceill", -/* CEIL_PPCF128 */ nullptr, -/* TRUNC_F32 */ "truncf", -/* TRUNC_F64 */ "trunc", -/* TRUNC_F80 */ nullptr, -/* TRUNC_F128 */ "truncl", -/* TRUNC_PPCF128 */ nullptr, -/* RINT_F32 */ "rintf", -/* RINT_F64 */ "rint", -/* RINT_F80 */ nullptr, -/* RINT_F128 */ "rintl", -/* RINT_PPCF128 */ nullptr, -/* NEARBYINT_F32 */ "nearbyintf", -/* NEARBYINT_F64 */ "nearbyint", -/* NEARBYINT_F80 */ nullptr, -/* NEARBYINT_F128 */ "nearbyintl", -/* NEARBYINT_PPCF128 */ nullptr, -/* ROUND_F32 */ "roundf", -/* ROUND_F64 */ "round", -/* ROUND_F80 */ nullptr, -/* ROUND_F128 */ "roundl", -/* ROUND_PPCF128 */ nullptr, -/* FLOOR_F32 */ "floorf", -/* FLOOR_F64 */ "floor", -/* FLOOR_F80 */ nullptr, -/* FLOOR_F128 */ "floorl", -/* FLOOR_PPCF128 */ nullptr, -/* COPYSIGN_F32 */ "copysignf", -/* COPYSIGN_F64 */ "copysign", -/* COPYSIGN_F80 */ nullptr, -/* COPYSIGN_F128 */ "copysignl", -/* COPYSIGN_PPCF128 */ nullptr, -/* FMIN_F32 */ "fminf", -/* FMIN_F64 */ "fmin", -/* FMIN_F80 */ nullptr, -/* FMIN_F128 */ "fminl", -/* FMIN_PPCF128 */ nullptr, -/* FMAX_F32 */ "fmaxf", -/* FMAX_F64 */ "fmax", -/* FMAX_F80 */ nullptr, -/* FMAX_F128 */ "fmaxl", -/* FMAX_PPCF128 */ nullptr, -/* FPEXT_F32_PPCF128 */ nullptr, -/* FPEXT_F64_PPCF128 */ nullptr, -/* FPEXT_F64_F128 */ "__extenddftf2", -/* FPEXT_F32_F128 */ "__extendsftf2", -/* FPEXT_F32_F64 */ "__extendsfdf2", -/* FPEXT_F16_F32 */ "__gnu_h2f_ieee", -/* FPROUND_F32_F16 */ "__gnu_f2h_ieee", -/* FPROUND_F64_F16 */ nullptr, -/* FPROUND_F80_F16 */ nullptr, -/* FPROUND_F128_F16 */ nullptr, -/* FPROUND_PPCF128_F16 */ nullptr, -/* FPROUND_F64_F32 */ "__truncdfsf2", -/* FPROUND_F80_F32 */ "__truncxfsf2", -/* FPROUND_F128_F32 */ "__trunctfsf2", -/* FPROUND_PPCF128_F32 */ nullptr, -/* FPROUND_F80_F64 */ "__truncxfdf2", -/* FPROUND_F128_F64 */ "__trunctfdf2", -/* FPROUND_PPCF128_F64 */ nullptr, -/* FPTOSINT_F32_I32 */ "__fixsfsi", -/* FPTOSINT_F32_I64 */ "__fixsfdi", -/* FPTOSINT_F32_I128 */ "__fixsfti", -/* FPTOSINT_F64_I32 */ "__fixdfsi", -/* FPTOSINT_F64_I64 */ "__fixdfdi", -/* FPTOSINT_F64_I128 */ "__fixdfti", -/* FPTOSINT_F80_I32 */ "__fixxfsi", -/* FPTOSINT_F80_I64 */ "__fixxfdi", -/* FPTOSINT_F80_I128 */ "__fixxfti", -/* FPTOSINT_F128_I32 */ "__fixtfsi", -/* FPTOSINT_F128_I64 */ "__fixtfdi", -/* FPTOSINT_F128_I128 */ "__fixtfti", -/* FPTOSINT_PPCF128_I32 */ nullptr, -/* FPTOSINT_PPCF128_I64 */ nullptr, -/* FPTOSINT_PPCF128_I128 */ nullptr, -/* FPTOUINT_F32_I32 */ "__fixunssfsi", -/* FPTOUINT_F32_I64 */ "__fixunssfdi", -/* FPTOUINT_F32_I128 */ "__fixunssfti", -/* FPTOUINT_F64_I32 */ "__fixunsdfsi", -/* FPTOUINT_F64_I64 */ "__fixunsdfdi", -/* FPTOUINT_F64_I128 */ "__fixunsdfti", -/* FPTOUINT_F80_I32 */ "__fixunsxfsi", -/* FPTOUINT_F80_I64 */ "__fixunsxfdi", -/* FPTOUINT_F80_I128 */ "__fixunsxfti", -/* FPTOUINT_F128_I32 */ "__fixunstfsi", -/* FPTOUINT_F128_I64 */ "__fixunstfdi", -/* FPTOUINT_F128_I128 */ "__fixunstfti", -/* FPTOUINT_PPCF128_I32 */ nullptr, -/* FPTOUINT_PPCF128_I64 */ nullptr, -/* FPTOUINT_PPCF128_I128 */ nullptr, -/* SINTTOFP_I32_F32 */ "__floatsisf", -/* SINTTOFP_I32_F64 */ "__floatsidf", -/* SINTTOFP_I32_F80 */ nullptr, -/* SINTTOFP_I32_F128 */ "__floatsitf", -/* SINTTOFP_I32_PPCF128 */ nullptr, -/* SINTTOFP_I64_F32 */ "__floatdisf", -/* SINTTOFP_I64_F64 */ "__floatdidf", -/* SINTTOFP_I64_F80 */ nullptr, -/* SINTTOFP_I64_F128 */ "__floatditf", -/* SINTTOFP_I64_PPCF128 */ nullptr, -/* SINTTOFP_I128_F32 */ "__floattisf", -/* SINTTOFP_I128_F64 */ "__floattidf", -/* SINTTOFP_I128_F80 */ nullptr, -/* SINTTOFP_I128_F128 */ "__floattitf", -/* SINTTOFP_I128_PPCF128 */ nullptr, -/* UINTTOFP_I32_F32 */ "__floatunsisf", -/* UINTTOFP_I32_F64 */ "__floatunsidf", -/* UINTTOFP_I32_F80 */ nullptr, -/* UINTTOFP_I32_F128 */ "__floatunsitf", -/* UINTTOFP_I32_PPCF128 */ nullptr, -/* UINTTOFP_I64_F32 */ "__floatundisf", -/* UINTTOFP_I64_F64 */ "__floatundidf", -/* UINTTOFP_I64_F80 */ nullptr, -/* UINTTOFP_I64_F128 */ "__floatunditf", -/* UINTTOFP_I64_PPCF128 */ nullptr, -/* UINTTOFP_I128_F32 */ "__floatuntisf", -/* UINTTOFP_I128_F64 */ "__floatuntidf", -/* UINTTOFP_I128_F80 */ nullptr, -/* UINTTOFP_I128_F128 */ "__floatuntitf", -/* UINTTOFP_I128_PPCF128 */ nullptr, -/* OEQ_F32 */ "__eqsf2", -/* OEQ_F64 */ "__eqdf2", -/* OEQ_F128 */ "__eqtf2", -/* OEQ_PPCF128 */ nullptr, -/* UNE_F32 */ "__nesf2", -/* UNE_F64 */ "__nedf2", -/* UNE_F128 */ "__netf2", -/* UNE_PPCF128 */ nullptr, -/* OGE_F32 */ "__gesf2", -/* OGE_F64 */ "__gedf2", -/* OGE_F128 */ "__getf2", -/* OGE_PPCF128 */ nullptr, -/* OLT_F32 */ "__ltsf2", -/* OLT_F64 */ "__ltdf2", -/* OLT_F128 */ "__lttf2", -/* OLT_PPCF128 */ nullptr, -/* OLE_F32 */ "__lesf2", -/* OLE_F64 */ "__ledf2", -/* OLE_F128 */ "__letf2", -/* OLE_PPCF128 */ nullptr, -/* OGT_F32 */ "__gtsf2", -/* OGT_F64 */ "__gtdf2", -/* OGT_F128 */ "__gttf2", -/* OGT_PPCF128 */ nullptr, -/* UO_F32 */ "__unordsf2", -/* UO_F64 */ "__unorddf2", -/* UO_F128 */ "__unordtf2", -/* UO_PPCF128 */ nullptr, -/* O_F32 */ "__unordsf2", -/* O_F64 */ "__unorddf2", -/* O_F128 */ "__unordtf2", -/* O_PPCF128 */ nullptr, -/* MEMCPY */ "memcpy", -/* MEMMOVE */ "memset", -/* MEMSET */ "memmove", -/* BZERO */ nullptr, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr, -/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr, -/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr, -/* MEMSET_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr, -/* UNWIND_RESUME */ "_Unwind_Resume", -/* SYNC_VAL_COMPARE_AND_SWAP_1 */ "__sync_val_compare_and_swap_1", -/* SYNC_VAL_COMPARE_AND_SWAP_2 */ "__sync_val_compare_and_swap_2", -/* SYNC_VAL_COMPARE_AND_SWAP_4 */ "__sync_val_compare_and_swap_4", -/* SYNC_VAL_COMPARE_AND_SWAP_8 */ "__sync_val_compare_and_swap_8", -/* SYNC_VAL_COMPARE_AND_SWAP_16 */ "__sync_val_compare_and_swap_16", -/* SYNC_LOCK_TEST_AND_SET_1 */ "__sync_lock_test_and_set_1", -/* SYNC_LOCK_TEST_AND_SET_2 */ "__sync_lock_test_and_set_2", -/* SYNC_LOCK_TEST_AND_SET_4 */ "__sync_lock_test_and_set_4", -/* SYNC_LOCK_TEST_AND_SET_8 */ "__sync_lock_test_and_set_8", -/* SYNC_LOCK_TEST_AND_SET_16 */ "__sync_lock_test_and_set_16", -/* SYNC_FETCH_AND_ADD_1 */ "__sync_fetch_and_add_1", -/* SYNC_FETCH_AND_ADD_2 */ "__sync_fetch_and_add_2", -/* SYNC_FETCH_AND_ADD_4 */ "__sync_fetch_and_add_4", -/* SYNC_FETCH_AND_ADD_8 */ "__sync_fetch_and_add_8", -/* SYNC_FETCH_AND_ADD_16 */ "__sync_fetch_and_add_16", -/* SYNC_FETCH_AND_SUB_1 */ "__sync_fetch_and_sub_1", -/* SYNC_FETCH_AND_SUB_2 */ "__sync_fetch_and_sub_2", -/* SYNC_FETCH_AND_SUB_4 */ "__sync_fetch_and_sub_4", -/* SYNC_FETCH_AND_SUB_8 */ "__sync_fetch_and_sub_8", -/* SYNC_FETCH_AND_SUB_16 */ "__sync_fetch_and_sub_16", -/* SYNC_FETCH_AND_AND_1 */ "__sync_fetch_and_and_1", -/* SYNC_FETCH_AND_AND_2 */ "__sync_fetch_and_and_2", -/* SYNC_FETCH_AND_AND_4 */ "__sync_fetch_and_and_4", -/* SYNC_FETCH_AND_AND_8 */ "__sync_fetch_and_and_8", -/* SYNC_FETCH_AND_AND_16 */ "__sync_fetch_and_and_16", -/* SYNC_FETCH_AND_OR_1 */ "__sync_fetch_and_or_1", -/* SYNC_FETCH_AND_OR_2 */ "__sync_fetch_and_or_2", -/* SYNC_FETCH_AND_OR_4 */ "__sync_fetch_and_or_4", -/* SYNC_FETCH_AND_OR_8 */ "__sync_fetch_and_or_8", -/* SYNC_FETCH_AND_OR_16 */ "__sync_fetch_and_or_16", -/* SYNC_FETCH_AND_XOR_1 */ "__sync_fetch_and_xor_1", -/* SYNC_FETCH_AND_XOR_2 */ "__sync_fetch_and_xor_2", -/* SYNC_FETCH_AND_XOR_4 */ "__sync_fetch_and_xor_4", -/* SYNC_FETCH_AND_XOR_8 */ "__sync_fetch_and_xor_8", -/* SYNC_FETCH_AND_XOR_16 */ "__sync_fetch_and_xor_16", -/* SYNC_FETCH_AND_NAND_1 */ "__sync_fetch_and_nand_1", -/* SYNC_FETCH_AND_NAND_2 */ "__sync_fetch_and_nand_2", -/* SYNC_FETCH_AND_NAND_4 */ "__sync_fetch_and_nand_4", -/* SYNC_FETCH_AND_NAND_8 */ "__sync_fetch_and_nand_8", -/* SYNC_FETCH_AND_NAND_16 */ "__sync_fetch_and_nand_16", -/* SYNC_FETCH_AND_MAX_1 */ "__sync_fetch_and_max_1", -/* SYNC_FETCH_AND_MAX_2 */ "__sync_fetch_and_max_2", -/* SYNC_FETCH_AND_MAX_4 */ "__sync_fetch_and_max_4", -/* SYNC_FETCH_AND_MAX_8 */ "__sync_fetch_and_max_8", -/* SYNC_FETCH_AND_MAX_16 */ "__sync_fetch_and_max_16", -/* SYNC_FETCH_AND_UMAX_1 */ "__sync_fetch_and_umax_1", -/* SYNC_FETCH_AND_UMAX_2 */ "__sync_fetch_and_umax_2", -/* SYNC_FETCH_AND_UMAX_4 */ "__sync_fetch_and_umax_4", -/* SYNC_FETCH_AND_UMAX_8 */ "__sync_fetch_and_umax_8", -/* SYNC_FETCH_AND_UMAX_16 */ "__sync_fetch_and_umax_16", -/* SYNC_FETCH_AND_MIN_1 */ "__sync_fetch_and_min_1", -/* SYNC_FETCH_AND_MIN_2 */ "__sync_fetch_and_min_2", -/* SYNC_FETCH_AND_MIN_4 */ "__sync_fetch_and_min_4", -/* SYNC_FETCH_AND_MIN_8 */ "__sync_fetch_and_min_8", -/* SYNC_FETCH_AND_MIN_16 */ "__sync_fetch_and_min_16", -/* SYNC_FETCH_AND_UMIN_1 */ "__sync_fetch_and_umin_1", -/* SYNC_FETCH_AND_UMIN_2 */ "__sync_fetch_and_umin_2", -/* SYNC_FETCH_AND_UMIN_4 */ "__sync_fetch_and_umin_4", -/* SYNC_FETCH_AND_UMIN_8 */ "__sync_fetch_and_umin_8", -/* SYNC_FETCH_AND_UMIN_16 */ "__sync_fetch_and_umin_16", - -/* ATOMIC_LOAD */ "__atomic_load", -/* ATOMIC_LOAD_1 */ "__atomic_load_1", -/* ATOMIC_LOAD_2 */ "__atomic_load_2", -/* ATOMIC_LOAD_4 */ "__atomic_load_4", -/* ATOMIC_LOAD_8 */ "__atomic_load_8", -/* ATOMIC_LOAD_16 */ "__atomic_load_16", - -/* ATOMIC_STORE */ "__atomic_store", -/* ATOMIC_STORE_1 */ "__atomic_store_1", -/* ATOMIC_STORE_2 */ "__atomic_store_2", -/* ATOMIC_STORE_4 */ "__atomic_store_4", -/* ATOMIC_STORE_8 */ "__atomic_store_8", -/* ATOMIC_STORE_16 */ "__atomic_store_16", - -/* ATOMIC_EXCHANGE */ "__atomic_exchange", -/* ATOMIC_EXCHANGE_1 */ "__atomic_exchange_1", -/* ATOMIC_EXCHANGE_2 */ "__atomic_exchange_2", -/* ATOMIC_EXCHANGE_4 */ "__atomic_exchange_4", -/* ATOMIC_EXCHANGE_8 */ "__atomic_exchange_8", -/* ATOMIC_EXCHANGE_16 */ "__atomic_exchange_16", - -/* ATOMIC_COMPARE_EXCHANGE */ "__atomic_compare_exchange", -/* ATOMIC_COMPARE_EXCHANGE_1 */ "__atomic_compare_exchange_1", -/* ATOMIC_COMPARE_EXCHANGE_2 */ "__atomic_compare_exchange_2", -/* ATOMIC_COMPARE_EXCHANGE_4 */ "__atomic_compare_exchange_4", -/* ATOMIC_COMPARE_EXCHANGE_8 */ "__atomic_compare_exchange_8", -/* ATOMIC_COMPARE_EXCHANGE_16 */ "__atomic_compare_exchange_16", +ManagedStatic<RuntimeLibcallSignatureTable> RuntimeLibcallSignatures; + +// Maps libcall names to their RTLIB::Libcall number. Builds the map in a +// constructor for use with ManagedStatic +struct StaticLibcallNameMap { + StringMap<RTLIB::Libcall> Map; + StaticLibcallNameMap() { + static const std::pair<const char *, RTLIB::Libcall> NameLibcalls[] = { +#define HANDLE_LIBCALL(code, name) {(const char *)name, RTLIB::code}, +#include "llvm/IR/RuntimeLibcalls.def" +#undef HANDLE_LIBCALL + }; + for (const auto &NameLibcall : NameLibcalls) { + if (NameLibcall.first != nullptr && + RuntimeLibcallSignatures->Table[NameLibcall.second] != unsupported) { + assert(Map.find(NameLibcall.first) == Map.end() && + "duplicate libcall names in name map"); + Map[NameLibcall.first] = NameLibcall.second; + } + } + } +}; -/* ATOMIC_FETCH_ADD_1 */ "__atomic_fetch_add_1", -/* ATOMIC_FETCH_ADD_2 */ "__atomic_fetch_add_2", -/* ATOMIC_FETCH_ADD_4 */ "__atomic_fetch_add_4", -/* ATOMIC_FETCH_ADD_8 */ "__atomic_fetch_add_8", -/* ATOMIC_FETCH_ADD_16 */ "__atomic_fetch_add_16", -/* ATOMIC_FETCH_SUB_1 */ "__atomic_fetch_sub_1", -/* ATOMIC_FETCH_SUB_2 */ "__atomic_fetch_sub_2", -/* ATOMIC_FETCH_SUB_4 */ "__atomic_fetch_sub_4", -/* ATOMIC_FETCH_SUB_8 */ "__atomic_fetch_sub_8", -/* ATOMIC_FETCH_SUB_16 */ "__atomic_fetch_sub_16", -/* ATOMIC_FETCH_AND_1 */ "__atomic_fetch_and_1", -/* ATOMIC_FETCH_AND_2 */ "__atomic_fetch_and_2", -/* ATOMIC_FETCH_AND_4 */ "__atomic_fetch_and_4", -/* ATOMIC_FETCH_AND_8 */ "__atomic_fetch_and_8", -/* ATOMIC_FETCH_AND_16 */ "__atomic_fetch_and_16", -/* ATOMIC_FETCH_OR_1 */ "__atomic_fetch_or_1", -/* ATOMIC_FETCH_OR_2 */ "__atomic_fetch_or_2", -/* ATOMIC_FETCH_OR_4 */ "__atomic_fetch_or_4", -/* ATOMIC_FETCH_OR_8 */ "__atomic_fetch_or_8", -/* ATOMIC_FETCH_OR_16 */ "__atomic_fetch_or_16", -/* ATOMIC_FETCH_XOR_1 */ "__atomic_fetch_xor_1", -/* ATOMIC_FETCH_XOR_2 */ "__atomic_fetch_xor_2", -/* ATOMIC_FETCH_XOR_4 */ "__atomic_fetch_xor_4", -/* ATOMIC_FETCH_XOR_8 */ "__atomic_fetch_xor_8", -/* ATOMIC_FETCH_XOR_16 */ "__atomic_fetch_xor_16", -/* ATOMIC_FETCH_NAND_1 */ "__atomic_fetch_nand_1", -/* ATOMIC_FETCH_NAND_2 */ "__atomic_fetch_nand_2", -/* ATOMIC_FETCH_NAND_4 */ "__atomic_fetch_nand_4", -/* ATOMIC_FETCH_NAND_8 */ "__atomic_fetch_nand_8", -/* ATOMIC_FETCH_NAND_16 */ "__atomic_fetch_nand_16", +} // end anonymous namespace -/* STACKPROTECTOR_CHECK_FAIL */ "__stack_chk_fail", -/* DEOPTIMIZE */ "__llvm_deoptimize", -}; void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, RTLIB::Libcall LC, SmallVectorImpl<wasm::ValType> &Rets, @@ -1003,11 +494,11 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, assert(Rets.empty()); assert(Params.empty()); - WebAssembly::ExprType iPTR = Subtarget.hasAddr64() ? - WebAssembly::ExprType::I64 : - WebAssembly::ExprType::I32; + wasm::ValType iPTR = + Subtarget.hasAddr64() ? wasm::ValType::I64 : wasm::ValType::I32; - switch (RuntimeLibcallSignatures[LC]) { + auto& Table = RuntimeLibcallSignatures->Table; + switch (Table[LC]) { case func: break; case f32_func_f32: @@ -1111,13 +602,13 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, break; case func_f32_iPTR_iPTR: Params.push_back(wasm::ValType::F32); - Params.push_back(wasm::ValType(iPTR)); - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); + Params.push_back(iPTR); break; case func_f64_iPTR_iPTR: Params.push_back(wasm::ValType::F64); - Params.push_back(wasm::ValType(iPTR)); - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); + Params.push_back(iPTR); break; case i16_func_i16_i16: Rets.push_back(wasm::ValType::I32); @@ -1139,17 +630,29 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Params.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); break; + case i32_func_i32_i32_iPTR: + Rets.push_back(wasm::ValType::I32); + Params.push_back(wasm::ValType::I32); + Params.push_back(wasm::ValType::I32); + Params.push_back(iPTR); + break; case i64_func_i64_i64: Rets.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); break; + case i64_func_i64_i64_iPTR: + Rets.push_back(wasm::ValType::I64); + Params.push_back(wasm::ValType::I64); + Params.push_back(wasm::ValType::I64); + Params.push_back(iPTR); + break; case i64_i64_func_f32: #if 0 // TODO: Enable this when wasm gets multiple-return-value support. Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::F32); break; @@ -1158,7 +661,7 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::F64); break; @@ -1167,7 +670,7 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I32); Rets.push_back(wasm::ValType::I32); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); @@ -1177,7 +680,7 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I32); Rets.push_back(wasm::ValType::I32); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::I32); Params.push_back(wasm::ValType::I32); @@ -1187,7 +690,7 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); @@ -1197,13 +700,26 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); break; + case i64_i64_func_i64_i64_i64_i64_iPTR: +#if 0 // TODO: Enable this when wasm gets multiple-return-value support. + Rets.push_back(wasm::ValType::I64); + Rets.push_back(wasm::ValType::I64); +#else + Params.push_back(iPTR); +#endif + Params.push_back(wasm::ValType::I64); + Params.push_back(wasm::ValType::I64); + Params.push_back(wasm::ValType::I64); + Params.push_back(wasm::ValType::I64); + Params.push_back(iPTR); + break; case i64_i64_i64_i64_func_i64_i64_i64_i64: #if 0 // TODO: Enable this when wasm gets multiple-return-value support. Rets.push_back(wasm::ValType::I64); @@ -1211,7 +727,7 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); @@ -1225,23 +741,23 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, Rets.push_back(wasm::ValType::I64); Rets.push_back(wasm::ValType::I64); #else - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); #endif Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I32); break; case iPTR_func_iPTR_i32_iPTR: - Rets.push_back(wasm::ValType(iPTR)); - Params.push_back(wasm::ValType(iPTR)); + Rets.push_back(iPTR); + Params.push_back(iPTR); Params.push_back(wasm::ValType::I32); - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); break; case iPTR_func_iPTR_iPTR_iPTR: - Rets.push_back(wasm::ValType(iPTR)); - Params.push_back(wasm::ValType(iPTR)); - Params.push_back(wasm::ValType(iPTR)); - Params.push_back(wasm::ValType(iPTR)); + Rets.push_back(iPTR); + Params.push_back(iPTR); + Params.push_back(iPTR); + Params.push_back(iPTR); break; case f32_func_f32_f32_f32: Rets.push_back(wasm::ValType::F32); @@ -1258,39 +774,39 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, case func_i64_i64_iPTR_iPTR: Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); - Params.push_back(wasm::ValType(iPTR)); - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); + Params.push_back(iPTR); break; case func_iPTR_f32: - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); Params.push_back(wasm::ValType::F32); break; case func_iPTR_f64: - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); Params.push_back(wasm::ValType::F64); break; case func_iPTR_i32: - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); Params.push_back(wasm::ValType::I32); break; case func_iPTR_i64: - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); Params.push_back(wasm::ValType::I64); break; case func_iPTR_i64_i64: - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); break; case func_iPTR_i64_i64_i64_i64: - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); break; case func_iPTR_i64_i64_i64_i64_i64_i64: - Params.push_back(wasm::ValType(iPTR)); + Params.push_back(iPTR); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); Params.push_back(wasm::ValType::I64); @@ -1315,15 +831,14 @@ void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, } } +static ManagedStatic<StaticLibcallNameMap> LibcallNameMap; +// TODO: If the RTLIB::Libcall-taking flavor of GetSignature remains unsed +// other than here, just roll its logic into this version. void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, const char *Name, SmallVectorImpl<wasm::ValType> &Rets, SmallVectorImpl<wasm::ValType> &Params) { - assert(strcmp(RuntimeLibcallNames[RTLIB::DEOPTIMIZE], "__llvm_deoptimize") == - 0); - - for (size_t i = 0, e = RTLIB::UNKNOWN_LIBCALL; i < e; ++i) - if (RuntimeLibcallNames[i] && strcmp(RuntimeLibcallNames[i], Name) == 0) - return GetSignature(Subtarget, RTLIB::Libcall(i), Rets, Params); - - llvm_unreachable("unexpected runtime library name"); + auto& Map = LibcallNameMap->Map; + auto val = Map.find(Name); + assert(val != Map.end() && "unexpected runtime library name"); + return GetSignature(Subtarget, val->second, Rets, Params); } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h index 129067604784..2ba65ff5b716 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file provides signature information for runtime libcalls. +/// This file provides signature information for runtime libcalls. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp index fae9c6100510..bec72049258a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssemblySelectionDAGInfo class. +/// This file implements the WebAssemblySelectionDAGInfo class. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h index 533c66b7a22f..31d150eded67 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly subclass for +/// This file defines the WebAssembly subclass for /// SelectionDAGTargetInfo. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp index c4b9e915b41e..14221993603a 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file sets the p2align operands on load and store instructions. +/// This file sets the p2align operands on load and store instructions. /// //===----------------------------------------------------------------------===// @@ -46,6 +46,10 @@ public: } // end anonymous namespace char WebAssemblySetP2AlignOperands::ID = 0; +INITIALIZE_PASS(WebAssemblySetP2AlignOperands, DEBUG_TYPE, + "Set the p2align operands for WebAssembly loads and stores", + false, false) + FunctionPass *llvm::createWebAssemblySetP2AlignOperands() { return new WebAssemblySetP2AlignOperands(); } @@ -72,7 +76,7 @@ static void RewriteP2Align(MachineInstr &MI, unsigned OperandNo) { } bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "********** Set p2align Operands **********\n" << "********** Function: " << MF.getName() << '\n'; }); @@ -103,6 +107,48 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) { case WebAssembly::ATOMIC_LOAD8_U_I64: case WebAssembly::ATOMIC_LOAD16_U_I64: case WebAssembly::ATOMIC_LOAD32_U_I64: + case WebAssembly::ATOMIC_RMW8_U_ADD_I32: + case WebAssembly::ATOMIC_RMW8_U_ADD_I64: + case WebAssembly::ATOMIC_RMW8_U_SUB_I32: + case WebAssembly::ATOMIC_RMW8_U_SUB_I64: + case WebAssembly::ATOMIC_RMW8_U_AND_I32: + case WebAssembly::ATOMIC_RMW8_U_AND_I64: + case WebAssembly::ATOMIC_RMW8_U_OR_I32: + case WebAssembly::ATOMIC_RMW8_U_OR_I64: + case WebAssembly::ATOMIC_RMW8_U_XOR_I32: + case WebAssembly::ATOMIC_RMW8_U_XOR_I64: + case WebAssembly::ATOMIC_RMW8_U_XCHG_I32: + case WebAssembly::ATOMIC_RMW8_U_XCHG_I64: + case WebAssembly::ATOMIC_RMW16_U_ADD_I32: + case WebAssembly::ATOMIC_RMW16_U_ADD_I64: + case WebAssembly::ATOMIC_RMW16_U_SUB_I32: + case WebAssembly::ATOMIC_RMW16_U_SUB_I64: + case WebAssembly::ATOMIC_RMW16_U_AND_I32: + case WebAssembly::ATOMIC_RMW16_U_AND_I64: + case WebAssembly::ATOMIC_RMW16_U_OR_I32: + case WebAssembly::ATOMIC_RMW16_U_OR_I64: + case WebAssembly::ATOMIC_RMW16_U_XOR_I32: + case WebAssembly::ATOMIC_RMW16_U_XOR_I64: + case WebAssembly::ATOMIC_RMW16_U_XCHG_I32: + case WebAssembly::ATOMIC_RMW16_U_XCHG_I64: + case WebAssembly::ATOMIC_RMW_ADD_I32: + case WebAssembly::ATOMIC_RMW32_U_ADD_I64: + case WebAssembly::ATOMIC_RMW_SUB_I32: + case WebAssembly::ATOMIC_RMW32_U_SUB_I64: + case WebAssembly::ATOMIC_RMW_AND_I32: + case WebAssembly::ATOMIC_RMW32_U_AND_I64: + case WebAssembly::ATOMIC_RMW_OR_I32: + case WebAssembly::ATOMIC_RMW32_U_OR_I64: + case WebAssembly::ATOMIC_RMW_XOR_I32: + case WebAssembly::ATOMIC_RMW32_U_XOR_I64: + case WebAssembly::ATOMIC_RMW_XCHG_I32: + case WebAssembly::ATOMIC_RMW32_U_XCHG_I64: + case WebAssembly::ATOMIC_RMW_ADD_I64: + case WebAssembly::ATOMIC_RMW_SUB_I64: + case WebAssembly::ATOMIC_RMW_AND_I64: + case WebAssembly::ATOMIC_RMW_OR_I64: + case WebAssembly::ATOMIC_RMW_XOR_I64: + case WebAssembly::ATOMIC_RMW_XCHG_I64: RewriteP2Align(MI, WebAssembly::LoadP2AlignOperandNo); break; case WebAssembly::STORE_I32: @@ -114,6 +160,13 @@ bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) { case WebAssembly::STORE8_I64: case WebAssembly::STORE16_I64: case WebAssembly::STORE32_I64: + case WebAssembly::ATOMIC_STORE_I32: + case WebAssembly::ATOMIC_STORE8_I32: + case WebAssembly::ATOMIC_STORE16_I32: + case WebAssembly::ATOMIC_STORE_I64: + case WebAssembly::ATOMIC_STORE8_I64: + case WebAssembly::ATOMIC_STORE16_I64: + case WebAssembly::ATOMIC_STORE32_I64: RewriteP2Align(MI, WebAssembly::StoreP2AlignOperandNo); break; default: diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp index 22a5a9099e72..893e8484c4c6 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements an optimization pass using store result values. +/// This file implements an optimization pass using store result values. /// /// WebAssembly's store instructions return the stored value. This is to enable /// an optimization wherein uses of the stored value can be replaced by uses of @@ -68,6 +68,9 @@ private: } // end anonymous namespace char WebAssemblyStoreResults::ID = 0; +INITIALIZE_PASS(WebAssemblyStoreResults, DEBUG_TYPE, + "Optimize store result values for WebAssembly", false, false) + FunctionPass *llvm::createWebAssemblyStoreResults() { return new WebAssemblyStoreResults(); } @@ -108,8 +111,8 @@ static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI, continue; Changed = true; - DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from " - << MI << "\n"); + LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from " + << MI << "\n"); O.setReg(ToReg); // If the store's def was previously dead, it is no longer. @@ -167,7 +170,7 @@ static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI, } bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) { - DEBUG({ + LLVM_DEBUG({ dbgs() << "********** Store Results **********\n" << "********** Function: " << MF.getName() << '\n'; }); @@ -186,7 +189,7 @@ bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) { assert(MRI.tracksLiveness() && "StoreResults expects liveness tracking"); for (auto &MBB : MF) { - DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n'); + LLVM_DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n'); for (auto &MI : MBB) switch (MI.getOpcode()) { default: diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp index 9e122a5f1574..d6af0fb219d7 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements the WebAssembly-specific subclass of +/// This file implements the WebAssembly-specific subclass of /// TargetSubtarget. /// //===----------------------------------------------------------------------===// @@ -41,9 +41,9 @@ WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT, const std::string &FS, const TargetMachine &TM) : WebAssemblyGenSubtargetInfo(TT, CPU, FS), HasSIMD128(false), - HasAtomics(false), HasNontrappingFPToInt(false), CPUString(CPU), - TargetTriple(TT), FrameLowering(), - InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(), + HasAtomics(false), HasNontrappingFPToInt(false), HasSignExt(false), + HasExceptionHandling(false), CPUString(CPU), TargetTriple(TT), + FrameLowering(), InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(), TLInfo(TM, *this) {} bool WebAssemblySubtarget::enableMachineScheduler() const { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h index a6bf0b6d54f6..b170dbff3b32 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the WebAssembly-specific subclass of +/// This file declares the WebAssembly-specific subclass of /// TargetSubtarget. /// //===----------------------------------------------------------------------===// @@ -32,6 +32,8 @@ class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo { bool HasSIMD128; bool HasAtomics; bool HasNontrappingFPToInt; + bool HasSignExt; + bool HasExceptionHandling; /// String name of used CPU. std::string CPUString; @@ -78,6 +80,8 @@ public: bool hasSIMD128() const { return HasSIMD128; } bool hasAtomics() const { return HasAtomics; } bool hasNontrappingFPToInt() const { return HasNontrappingFPToInt; } + bool hasSignExt() const { return HasSignExt; } + bool hasExceptionHandling() const { return HasExceptionHandling; } /// Parses features string setting specified subtarget options. Definition of /// function is auto generated by tblgen. diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp index d38cde74d2ec..7c10f022cbbc 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly-specific subclass of TargetMachine. +/// This file defines the WebAssembly-specific subclass of TargetMachine. /// //===----------------------------------------------------------------------===// @@ -25,6 +25,7 @@ #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils.h" using namespace llvm; #define DEBUG_TYPE "wasm" @@ -48,9 +49,31 @@ extern "C" void LLVMInitializeWebAssemblyTarget() { RegisterTargetMachine<WebAssemblyTargetMachine> Y( getTheWebAssemblyTarget64()); - // Register exception handling pass to opt - initializeWebAssemblyLowerEmscriptenEHSjLjPass( - *PassRegistry::getPassRegistry()); + // Register backend passes + auto &PR = *PassRegistry::getPassRegistry(); + initializeWebAssemblyAddMissingPrototypesPass(PR); + initializeWebAssemblyLowerEmscriptenEHSjLjPass(PR); + initializeLowerGlobalDtorsPass(PR); + initializeFixFunctionBitcastsPass(PR); + initializeOptimizeReturnedPass(PR); + initializeWebAssemblyArgumentMovePass(PR); + initializeWebAssemblySetP2AlignOperandsPass(PR); + initializeWebAssemblyReplacePhysRegsPass(PR); + initializeWebAssemblyPrepareForLiveIntervalsPass(PR); + initializeWebAssemblyOptimizeLiveIntervalsPass(PR); + initializeWebAssemblyStoreResultsPass(PR); + initializeWebAssemblyRegStackifyPass(PR); + initializeWebAssemblyRegColoringPass(PR); + initializeWebAssemblyExplicitLocalsPass(PR); + initializeWebAssemblyFixIrreducibleControlFlowPass(PR); + initializeWebAssemblyLateEHPreparePass(PR); + initializeWebAssemblyExceptionInfoPass(PR); + initializeWebAssemblyCFGSortPass(PR); + initializeWebAssemblyCFGStackifyPass(PR); + initializeWebAssemblyLowerBrUnlessPass(PR); + initializeWebAssemblyRegNumberingPass(PR); + initializeWebAssemblyPeepholePass(PR); + initializeWebAssemblyCallIndirectFixupPass(PR); } //===----------------------------------------------------------------------===// @@ -74,11 +97,7 @@ WebAssemblyTargetMachine::WebAssemblyTargetMachine( : "e-m:e-p:32:32-i64:64-n32:64-S128", TT, CPU, FS, Options, getEffectiveRelocModel(RM), CM ? *CM : CodeModel::Large, OL), - TLOF(TT.isOSBinFormatELF() ? - static_cast<TargetLoweringObjectFile*>( - new WebAssemblyTargetObjectFileELF()) : - static_cast<TargetLoweringObjectFile*>( - new WebAssemblyTargetObjectFile())) { + TLOF(new WebAssemblyTargetObjectFile()) { // WebAssembly type-checks instructions, but a noreturn function with a return // type that doesn't match the context will cause a check failure. So we lower // LLVM 'unreachable' to ISD::TRAP and then lower that to WebAssembly's @@ -87,11 +106,9 @@ WebAssemblyTargetMachine::WebAssemblyTargetMachine( // WebAssembly treats each function as an independent unit. Force // -ffunction-sections, effectively, so that we can emit them independently. - if (!TT.isOSBinFormatELF()) { - this->Options.FunctionSections = true; - this->Options.DataSections = true; - this->Options.UniqueSectionNames = true; - } + this->Options.FunctionSections = true; + this->Options.DataSections = true; + this->Options.UniqueSectionNames = true; initAsmInfo(); @@ -126,6 +143,22 @@ WebAssemblyTargetMachine::getSubtargetImpl(const Function &F) const { } namespace { +class StripThreadLocal final : public ModulePass { + // The default thread model for wasm is single, where thread-local variables + // are identical to regular globals and should be treated the same. So this + // pass just converts all GlobalVariables to NotThreadLocal + static char ID; + + public: + StripThreadLocal() : ModulePass(ID) {} + bool runOnModule(Module &M) override { + for (auto &GV : M.globals()) + GV.setThreadLocalMode(GlobalValue::ThreadLocalMode::NotThreadLocal); + return true; + } +}; +char StripThreadLocal::ID = 0; + /// WebAssembly Code Generator Pass Configuration Options. class WebAssemblyPassConfig final : public TargetPassConfig { public: @@ -166,13 +199,18 @@ FunctionPass *WebAssemblyPassConfig::createTargetRegisterAllocator(bool) { //===----------------------------------------------------------------------===// void WebAssemblyPassConfig::addIRPasses() { - if (TM->Options.ThreadModel == ThreadModel::Single) + if (TM->Options.ThreadModel == ThreadModel::Single) { // In "single" mode, atomics get lowered to non-atomics. addPass(createLowerAtomicPass()); - else + addPass(new StripThreadLocal()); + } else { // Expand some atomic operations. WebAssemblyTargetLowering has hooks which // control specifically what gets lowered. addPass(createAtomicExpandPass()); + } + + // Add signatures to prototype-less function declarations + addPass(createWebAssemblyAddMissingPrototypes()); // Lower .llvm.global_dtors into .llvm_global_ctors with __cxa_atexit calls. addPass(createWebAssemblyLowerGlobalDtors()); @@ -190,7 +228,8 @@ void WebAssemblyPassConfig::addIRPasses() { // blocks. Lowering invokes when there is no EH support is done in // TargetPassConfig::addPassesToHandleExceptions, but this runs after this // function and SjLj handling expects all invokes to be lowered before. - if (!EnableEmException) { + if (!EnableEmException && + TM->Options.ExceptionModel == ExceptionHandling::None) { addPass(createLowerInvokePass()); // The lower invoke pass may create unreachable code. Remove it in order not // to process dead blocks in setjmp/longjmp handling. @@ -225,16 +264,15 @@ void WebAssemblyPassConfig::addPostRegAlloc() { // virtual registers. Consider removing their restrictions and re-enabling // them. - // Has no asserts of its own, but was not written to handle virtual regs. - disablePass(&ShrinkWrapID); - // These functions all require the NoVRegs property. disablePass(&MachineCopyPropagationID); + disablePass(&PostRAMachineSinkingID); disablePass(&PostRASchedulerID); disablePass(&FuncletLayoutID); disablePass(&StackMapLivenessID); disablePass(&LiveDebugValuesID); disablePass(&PatchableFunctionID); + disablePass(&ShrinkWrapID); TargetPassConfig::addPostRegAlloc(); } @@ -282,6 +320,9 @@ void WebAssemblyPassConfig::addPreEmitPass() { // Insert explicit get_local and set_local operators. addPass(createWebAssemblyExplicitLocals()); + // Do various transformations for exception handling + addPass(createWebAssemblyLateEHPrepare()); + // Sort the blocks of the CFG into topological order, a prerequisite for // BLOCK and LOOP markers. addPass(createWebAssemblyCFGSort()); diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h index dd826befd117..41001e7a0cc7 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the WebAssembly-specific subclass of +/// This file declares the WebAssembly-specific subclass of /// TargetMachine. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp index b1fd108bc249..0459bfca418d 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp @@ -8,20 +8,15 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the functions of the WebAssembly-specific subclass +/// This file defines the functions of the WebAssembly-specific subclass /// of TargetLoweringObjectFile. /// //===----------------------------------------------------------------------===// #include "WebAssemblyTargetObjectFile.h" #include "WebAssemblyTargetMachine.h" -using namespace llvm; -void WebAssemblyTargetObjectFileELF::Initialize(MCContext &Ctx, - const TargetMachine &TM) { - TargetLoweringObjectFileELF::Initialize(Ctx, TM); - InitializeELF(TM.Options.UseInitArray); -} +using namespace llvm; void WebAssemblyTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM) { diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h index ace87c9e442f..ce744ba8b8e8 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file declares the WebAssembly-specific subclass of +/// This file declares the WebAssembly-specific subclass of /// TargetLoweringObjectFile. /// //===----------------------------------------------------------------------===// @@ -20,12 +20,6 @@ namespace llvm { -class WebAssemblyTargetObjectFileELF final - : public TargetLoweringObjectFileELF { -public: - void Initialize(MCContext &Ctx, const TargetMachine &TM) override; -}; - class WebAssemblyTargetObjectFile final : public TargetLoweringObjectFileWasm { public: void Initialize(MCContext &Ctx, const TargetMachine &TM) override; diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp index 2e002781f43d..4a2777cc3a9f 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file defines the WebAssembly-specific TargetTransformInfo +/// This file defines the WebAssembly-specific TargetTransformInfo /// implementation. /// //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h index 7b35fc916133..4300ca3defbf 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file a TargetTransformInfo::Concept conforming object specific +/// This file a TargetTransformInfo::Concept conforming object specific /// to the WebAssembly target machine. /// /// It uses the target's detailed information to provide more precise answers to diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp index e32772d491cf..5944cea5abd1 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file implements several utility functions for WebAssembly. +/// This file implements several utility functions for WebAssembly. /// //===----------------------------------------------------------------------===// @@ -18,6 +18,13 @@ #include "llvm/CodeGen/MachineLoopInfo.h" using namespace llvm; +const char *const WebAssembly::ClangCallTerminateFn = "__clang_call_terminate"; +const char *const WebAssembly::CxaBeginCatchFn = "__cxa_begin_catch"; +const char *const WebAssembly::CxaRethrowFn = "__cxa_rethrow"; +const char *const WebAssembly::StdTerminateFn = "_ZSt9terminatev"; +const char *const WebAssembly::PersonalityWrapperFn = + "_Unwind_Wasm_CallPersonality"; + bool WebAssembly::isArgument(const MachineInstr &MI) { switch (MI.getOpcode()) { case WebAssembly::ARGUMENT_I32: @@ -71,6 +78,24 @@ bool WebAssembly::isChild(const MachineInstr &MI, MFI.isVRegStackified(Reg); } +bool WebAssembly::isCallDirect(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case WebAssembly::CALL_VOID: + case WebAssembly::CALL_I32: + case WebAssembly::CALL_I64: + case WebAssembly::CALL_F32: + case WebAssembly::CALL_F64: + case WebAssembly::CALL_v16i8: + case WebAssembly::CALL_v8i16: + case WebAssembly::CALL_v4i32: + case WebAssembly::CALL_v4f32: + case WebAssembly::CALL_EXCEPT_REF: + return true; + default: + return false; + } +} + bool WebAssembly::isCallIndirect(const MachineInstr &MI) { switch (MI.getOpcode()) { case WebAssembly::CALL_INDIRECT_VOID: @@ -82,16 +107,136 @@ bool WebAssembly::isCallIndirect(const MachineInstr &MI) { case WebAssembly::CALL_INDIRECT_v8i16: case WebAssembly::CALL_INDIRECT_v4i32: case WebAssembly::CALL_INDIRECT_v4f32: + case WebAssembly::CALL_INDIRECT_EXCEPT_REF: + return true; + default: + return false; + } +} + +unsigned WebAssembly::getCalleeOpNo(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case WebAssembly::CALL_VOID: + case WebAssembly::CALL_INDIRECT_VOID: + return 0; + case WebAssembly::CALL_I32: + case WebAssembly::CALL_I64: + case WebAssembly::CALL_F32: + case WebAssembly::CALL_F64: + case WebAssembly::CALL_EXCEPT_REF: + case WebAssembly::CALL_INDIRECT_I32: + case WebAssembly::CALL_INDIRECT_I64: + case WebAssembly::CALL_INDIRECT_F32: + case WebAssembly::CALL_INDIRECT_F64: + case WebAssembly::CALL_INDIRECT_EXCEPT_REF: + return 1; + default: + llvm_unreachable("Not a call instruction"); + } +} + +bool WebAssembly::isMarker(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case WebAssembly::BLOCK: + case WebAssembly::END_BLOCK: + case WebAssembly::LOOP: + case WebAssembly::END_LOOP: + case WebAssembly::TRY: + case WebAssembly::END_TRY: + return true; + default: + return false; + } +} + +bool WebAssembly::isThrow(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case WebAssembly::THROW_I32: + case WebAssembly::THROW_I64: return true; default: return false; } } -MachineBasicBlock *llvm::LoopBottom(const MachineLoop *Loop) { - MachineBasicBlock *Bottom = Loop->getHeader(); - for (MachineBasicBlock *MBB : Loop->blocks()) - if (MBB->getNumber() > Bottom->getNumber()) - Bottom = MBB; - return Bottom; +bool WebAssembly::isRethrow(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case WebAssembly::RETHROW: + case WebAssembly::RETHROW_TO_CALLER: + return true; + default: + return false; + } +} + +bool WebAssembly::isCatch(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case WebAssembly::CATCH_I32: + case WebAssembly::CATCH_I64: + case WebAssembly::CATCH_ALL: + return true; + default: + return false; + } +} + +bool WebAssembly::mayThrow(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case WebAssembly::THROW_I32: + case WebAssembly::THROW_I64: + case WebAssembly::RETHROW: + return true; + } + if (isCallIndirect(MI)) + return true; + if (!MI.isCall()) + return false; + + const MachineOperand &MO = MI.getOperand(getCalleeOpNo(MI)); + assert(MO.isGlobal()); + const auto *F = dyn_cast<Function>(MO.getGlobal()); + if (!F) + return true; + if (F->doesNotThrow()) + return false; + // These functions never throw + if (F->getName() == CxaBeginCatchFn || F->getName() == PersonalityWrapperFn || + F->getName() == ClangCallTerminateFn || F->getName() == StdTerminateFn) + return false; + return true; +} + +bool WebAssembly::isCatchTerminatePad(const MachineBasicBlock &MBB) { + if (!MBB.isEHPad()) + return false; + bool SeenCatch = false; + for (auto &MI : MBB) { + if (MI.getOpcode() == WebAssembly::CATCH_I32 || + MI.getOpcode() == WebAssembly::CATCH_I64) + SeenCatch = true; + if (SeenCatch && MI.isCall()) { + const MachineOperand &CalleeOp = MI.getOperand(getCalleeOpNo(MI)); + if (CalleeOp.isGlobal() && + CalleeOp.getGlobal()->getName() == ClangCallTerminateFn) + return true; + } + } + return false; +} + +bool WebAssembly::isCatchAllTerminatePad(const MachineBasicBlock &MBB) { + if (!MBB.isEHPad()) + return false; + bool SeenCatchAll = false; + for (auto &MI : MBB) { + if (MI.getOpcode() == WebAssembly::CATCH_ALL) + SeenCatchAll = true; + if (SeenCatchAll && MI.isCall()) { + const MachineOperand &CalleeOp = MI.getOperand(getCalleeOpNo(MI)); + if (CalleeOp.isGlobal() && + CalleeOp.getGlobal()->getName() == StdTerminateFn) + return true; + } + } + return false; } diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h index 595491f1bf5b..cdb7873e9013 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// /// /// \file -/// \brief This file contains the declaration of the WebAssembly-specific +/// This file contains the declaration of the WebAssembly-specific /// utility functions. /// //===----------------------------------------------------------------------===// @@ -16,11 +16,10 @@ #ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYUTILITIES_H #define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYUTILITIES_H +#include "llvm/CodeGen/MachineBasicBlock.h" + namespace llvm { -class MachineBasicBlock; -class MachineInstr; -class MachineLoop; class WebAssemblyFunctionInfo; namespace WebAssembly { @@ -29,14 +28,44 @@ bool isArgument(const MachineInstr &MI); bool isCopy(const MachineInstr &MI); bool isTee(const MachineInstr &MI); bool isChild(const MachineInstr &MI, const WebAssemblyFunctionInfo &MFI); +bool isCallDirect(const MachineInstr &MI); bool isCallIndirect(const MachineInstr &MI); +bool isMarker(const MachineInstr &MI); +bool isThrow(const MachineInstr &MI); +bool isRethrow(const MachineInstr &MI); +bool isCatch(const MachineInstr &MI); +bool mayThrow(const MachineInstr &MI); -} // end namespace WebAssembly +/// Returns the operand number of a callee, assuming the argument is a call +/// instruction. +unsigned getCalleeOpNo(const MachineInstr &MI); + +/// Returns if the given BB is a single BB terminate pad which starts with a +/// 'catch' instruction. +bool isCatchTerminatePad(const MachineBasicBlock &MBB); +/// Returns if the given BB is a single BB terminate pad which starts with a +/// 'catch_all' insrtruction. +bool isCatchAllTerminatePad(const MachineBasicBlock &MBB); -/// Return the "bottom" block of a loop. This differs from -/// MachineLoop::getBottomBlock in that it works even if the loop is -/// discontiguous. -MachineBasicBlock *LoopBottom(const MachineLoop *Loop); +// Exception-related function names +extern const char *const ClangCallTerminateFn; +extern const char *const CxaBeginCatchFn; +extern const char *const CxaRethrowFn; +extern const char *const StdTerminateFn; +extern const char *const PersonalityWrapperFn; + +/// Return the "bottom" block of an entity, which can be either a MachineLoop or +/// WebAssemblyException. This differs from MachineLoop::getBottomBlock in that +/// it works even if the entity is discontiguous. +template <typename T> MachineBasicBlock *getBottom(const T *Unit) { + MachineBasicBlock *Bottom = Unit->getHeader(); + for (MachineBasicBlock *MBB : Unit->blocks()) + if (MBB->getNumber() > Bottom->getNumber()) + Bottom = MBB; + return Bottom; +} + +} // end namespace WebAssembly } // end namespace llvm diff --git a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt index 2eb73befc50b..364c871f61b0 100644 --- a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt +++ b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt @@ -5,22 +5,22 @@ # they pass. (Known failures that do not run at all will not cause an # error). The format is # <name> <attributes> # comment -# -# The attributes in this case represent the different arguments used to -# compiler: 'wasm-s' is for compiling to .s files, and 'wasm-o' for compiling -# to wasm object files (.o). # Computed gotos are not supported (Cannot select BlockAddress/BRIND) -20071220-1.c wasm-o,O0 +20071220-1.c +20071220-2.c 20040302-1.c 20041214-1.c O0 20071210-1.c -20071220-1.c wasm-s,O0 920501-4.c 920501-5.c comp-goto-1.c 980526-1.c 990208-1.c +label13.C O0 +label13a.C O0 +label3.C +pr42462.C O0 # WebAssembly hasn't implemented (will never?) __builtin_return_address 20010122-1.c @@ -76,6 +76,44 @@ pr41935.c 920728-1.c pr28865.c widechar-2.c +attr-alias-1.C +attr-alias-2.C +attr-ifunc-1.C +attr-ifunc-2.C +attr-ifunc-3.C +attr-ifunc-4.C +complit12.C +va-arg-pack-1.C +va-arg-pack-len-1.C +builtin-line1.C +builtin-location.C +devirt-6.C # bad main signature +devirt-13.C # bad main signature +devirt-14.C # bad main signature +devirt-21.C # bad main signature +devirt-23.C # bad main signature +lifetime2.C # violates C++ DR1696 -# Untriaged: Assertion failure in WasmObjectWriter::applyRelocations -20071220-2.c wasm-o,O0 +# Untriaged C++ failures +spec5.C +addr1.C +ef_test.C +friend18.C +member2.C +new39.C +new40.C +nrv8.C +offsetof9.C +opaque-1.C +pr19650.C +pr37146-1.C +pr46149.C +pr59470.C +rtti2.C +self1.C +type-generic-1.C +vbase8-10.C +vbase8-21.C +vbase8-22.C +vbase8-4.C +vector1.C |