aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/WebAssembly
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/WebAssembly')
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp70
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp258
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h60
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp206
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp68
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h31
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp83
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h39
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp152
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp140
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h185
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp264
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h112
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp102
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/README.txt168
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp36
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssembly.h59
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssembly.td73
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp95
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp286
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h77
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp277
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp372
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp133
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp378
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp1328
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp234
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp295
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp276
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h57
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def25
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp118
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp875
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h102
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td214
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td136
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td133
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td185
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td101
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td106
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp202
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h63
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td251
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td97
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td542
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td19
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp135
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp1179
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp191
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp235
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h46
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp62
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h121
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp105
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp76
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp210
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp124
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp174
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp107
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp894
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp148
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h52
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td62
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp99
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp1329
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h37
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp20
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h30
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp126
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp202
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp56
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h89
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp301
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h53
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp30
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h36
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp83
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h73
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp97
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h43
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt81
81 files changed, 15789 insertions, 0 deletions
diff --git a/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp b/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
new file mode 100644
index 000000000000..9be11da9afac
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/Disassembler/WebAssemblyDisassembler.cpp
@@ -0,0 +1,70 @@
+//==- WebAssemblyDisassembler.cpp - Disassembler for WebAssembly -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file is part of the WebAssembly Disassembler.
+///
+/// It contains code to translate the data produced by the decoder into
+/// MCInsts.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-disassembler"
+
+namespace {
+class WebAssemblyDisassembler final : public MCDisassembler {
+ std::unique_ptr<const MCInstrInfo> MCII;
+
+ DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &VStream,
+ raw_ostream &CStream) const override;
+
+public:
+ WebAssemblyDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
+ std::unique_ptr<const MCInstrInfo> MCII)
+ : MCDisassembler(STI, Ctx), MCII(std::move(MCII)) {}
+};
+} // end anonymous namespace
+
+static MCDisassembler *createWebAssemblyDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ std::unique_ptr<const MCInstrInfo> MCII(T.createMCInstrInfo());
+ return new WebAssemblyDisassembler(STI, Ctx, std::move(MCII));
+}
+
+extern "C" void LLVMInitializeWebAssemblyDisassembler() {
+ // Register the disassembler for each target.
+ TargetRegistry::RegisterMCDisassembler(getTheWebAssemblyTarget32(),
+ createWebAssemblyDisassembler);
+ TargetRegistry::RegisterMCDisassembler(getTheWebAssemblyTarget64(),
+ createWebAssemblyDisassembler);
+}
+
+MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction(
+ MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/,
+ raw_ostream &OS, raw_ostream &CS) const {
+
+ // TODO: Implement disassembly.
+
+ return MCDisassembler::Fail;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
new file mode 100644
index 000000000000..c3f0f2787146
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp
@@ -0,0 +1,258 @@
+//=- WebAssemblyInstPrinter.cpp - WebAssembly assembly instruction printing -=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Print MCInst instructions to wasm format.
+///
+//===----------------------------------------------------------------------===//
+
+#include "InstPrinter/WebAssemblyInstPrinter.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+#include "WebAssemblyGenAsmWriter.inc"
+
+WebAssemblyInstPrinter::WebAssemblyInstPrinter(const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI), ControlFlowCounter(0) {}
+
+void WebAssemblyInstPrinter::printRegName(raw_ostream &OS,
+ unsigned RegNo) const {
+ assert(RegNo != WebAssemblyFunctionInfo::UnusedReg);
+ // Note that there's an implicit get_local/set_local here!
+ OS << "$" << RegNo;
+}
+
+void WebAssemblyInstPrinter::printInst(const MCInst *MI, raw_ostream &OS,
+ StringRef Annot,
+ const MCSubtargetInfo & /*STI*/) {
+ // Print the instruction (this uses the AsmStrings from the .td files).
+ printInstruction(MI, OS);
+
+ // Print any additional variadic operands.
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ if (Desc.isVariadic())
+ for (auto i = Desc.getNumOperands(), e = MI->getNumOperands(); i < e; ++i) {
+ // FIXME: For CALL_INDIRECT_VOID, don't print a leading comma, because
+ // we have an extra flags operand which is not currently printed, for
+ // compatiblity reasons.
+ if (i != 0 &&
+ (MI->getOpcode() != WebAssembly::CALL_INDIRECT_VOID ||
+ i != Desc.getNumOperands()))
+ OS << ", ";
+ printOperand(MI, i, OS);
+ }
+
+ // Print any added annotation.
+ printAnnotation(OS, Annot);
+
+ if (CommentStream) {
+ // Observe any effects on the control flow stack, for use in annotating
+ // control flow label references.
+ switch (MI->getOpcode()) {
+ default:
+ break;
+ case WebAssembly::LOOP: {
+ printAnnotation(OS, "label" + utostr(ControlFlowCounter) + ':');
+ ControlFlowStack.push_back(std::make_pair(ControlFlowCounter++, true));
+ break;
+ }
+ case WebAssembly::BLOCK:
+ ControlFlowStack.push_back(std::make_pair(ControlFlowCounter++, false));
+ break;
+ case WebAssembly::END_LOOP:
+ ControlFlowStack.pop_back();
+ break;
+ case WebAssembly::END_BLOCK:
+ printAnnotation(
+ OS, "label" + utostr(ControlFlowStack.pop_back_val().first) + ':');
+ break;
+ }
+
+ // Annotate any control flow label references.
+ unsigned NumFixedOperands = Desc.NumOperands;
+ SmallSet<uint64_t, 8> Printed;
+ for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
+ if (!(i < NumFixedOperands
+ ? (Desc.OpInfo[i].OperandType ==
+ WebAssembly::OPERAND_BASIC_BLOCK)
+ : (Desc.TSFlags & WebAssemblyII::VariableOpImmediateIsLabel)))
+ continue;
+ uint64_t Depth = MI->getOperand(i).getImm();
+ if (!Printed.insert(Depth).second)
+ continue;
+ const auto &Pair = ControlFlowStack.rbegin()[Depth];
+ printAnnotation(OS, utostr(Depth) + ": " + (Pair.second ? "up" : "down") +
+ " to label" + utostr(Pair.first));
+ }
+ }
+}
+
+static std::string toString(const APFloat &FP) {
+ // Print NaNs with custom payloads specially.
+ if (FP.isNaN() &&
+ !FP.bitwiseIsEqual(APFloat::getQNaN(FP.getSemantics())) &&
+ !FP.bitwiseIsEqual(
+ APFloat::getQNaN(FP.getSemantics(), /*Negative=*/true))) {
+ APInt AI = FP.bitcastToAPInt();
+ return
+ std::string(AI.isNegative() ? "-" : "") + "nan:0x" +
+ utohexstr(AI.getZExtValue() &
+ (AI.getBitWidth() == 32 ? INT64_C(0x007fffff) :
+ INT64_C(0x000fffffffffffff)),
+ /*LowerCase=*/true);
+ }
+
+ // Use C99's hexadecimal floating-point representation.
+ static const size_t BufBytes = 128;
+ char buf[BufBytes];
+ auto Written = FP.convertToHexString(
+ buf, /*hexDigits=*/0, /*upperCase=*/false, APFloat::rmNearestTiesToEven);
+ (void)Written;
+ assert(Written != 0);
+ assert(Written < BufBytes);
+ return buf;
+}
+
+void WebAssemblyInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O) {
+ const MCOperand &Op = MI->getOperand(OpNo);
+ if (Op.isReg()) {
+ assert((OpNo < MII.get(MI->getOpcode()).getNumOperands() ||
+ MII.get(MI->getOpcode()).TSFlags == 0) &&
+ "WebAssembly variable_ops register ops don't use TSFlags");
+ unsigned WAReg = Op.getReg();
+ if (int(WAReg) >= 0)
+ printRegName(O, WAReg);
+ else if (OpNo >= MII.get(MI->getOpcode()).getNumDefs())
+ O << "$pop" << WebAssemblyFunctionInfo::getWARegStackId(WAReg);
+ else if (WAReg != WebAssemblyFunctionInfo::UnusedReg)
+ O << "$push" << WebAssemblyFunctionInfo::getWARegStackId(WAReg);
+ else
+ O << "$drop";
+ // Add a '=' suffix if this is a def.
+ if (OpNo < MII.get(MI->getOpcode()).getNumDefs())
+ O << '=';
+ } else if (Op.isImm()) {
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ assert((OpNo < Desc.getNumOperands() ||
+ (Desc.TSFlags & WebAssemblyII::VariableOpIsImmediate)) &&
+ "WebAssemblyII::VariableOpIsImmediate should be set for "
+ "variable_ops immediate ops");
+ (void)Desc;
+ // TODO: (MII.get(MI->getOpcode()).TSFlags &
+ // WebAssemblyII::VariableOpImmediateIsLabel)
+ // can tell us whether this is an immediate referencing a label in the
+ // control flow stack, and it may be nice to pretty-print.
+ O << Op.getImm();
+ } else if (Op.isFPImm()) {
+ const MCInstrDesc &Desc = MII.get(MI->getOpcode());
+ assert(OpNo < Desc.getNumOperands() &&
+ "Unexpected floating-point immediate as a non-fixed operand");
+ assert(Desc.TSFlags == 0 &&
+ "WebAssembly variable_ops floating point ops don't use TSFlags");
+ const MCOperandInfo &Info = Desc.OpInfo[OpNo];
+ if (Info.OperandType == WebAssembly::OPERAND_F32IMM) {
+ // TODO: MC converts all floating point immediate operands to double.
+ // This is fine for numeric values, but may cause NaNs to change bits.
+ O << toString(APFloat(float(Op.getFPImm())));
+ } else {
+ assert(Info.OperandType == WebAssembly::OPERAND_F64IMM);
+ O << toString(APFloat(Op.getFPImm()));
+ }
+ } else {
+ assert((OpNo < MII.get(MI->getOpcode()).getNumOperands() ||
+ (MII.get(MI->getOpcode()).TSFlags &
+ WebAssemblyII::VariableOpIsImmediate)) &&
+ "WebAssemblyII::VariableOpIsImmediate should be set for "
+ "variable_ops expr ops");
+ assert(Op.isExpr() && "unknown operand kind in printOperand");
+ Op.getExpr()->print(O, &MAI);
+ }
+}
+
+void
+WebAssemblyInstPrinter::printWebAssemblyP2AlignOperand(const MCInst *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(OpNo).getImm();
+ if (Imm == WebAssembly::GetDefaultP2Align(MI->getOpcode()))
+ return;
+ O << ":p2align=" << Imm;
+}
+
+void
+WebAssemblyInstPrinter::printWebAssemblySignatureOperand(const MCInst *MI,
+ unsigned OpNo,
+ raw_ostream &O) {
+ int64_t Imm = MI->getOperand(OpNo).getImm();
+ switch (WebAssembly::ExprType(Imm)) {
+ case WebAssembly::ExprType::Void: break;
+ case WebAssembly::ExprType::I32: O << "i32"; break;
+ case WebAssembly::ExprType::I64: O << "i64"; break;
+ case WebAssembly::ExprType::F32: O << "f32"; break;
+ case WebAssembly::ExprType::F64: O << "f64"; break;
+ case WebAssembly::ExprType::I8x16: O << "i8x16"; break;
+ case WebAssembly::ExprType::I16x8: O << "i16x8"; break;
+ case WebAssembly::ExprType::I32x4: O << "i32x4"; break;
+ case WebAssembly::ExprType::F32x4: O << "f32x4"; break;
+ case WebAssembly::ExprType::B8x16: O << "b8x16"; break;
+ case WebAssembly::ExprType::B16x8: O << "b16x8"; break;
+ case WebAssembly::ExprType::B32x4: O << "b32x4"; break;
+ }
+}
+
+const char *llvm::WebAssembly::TypeToString(MVT Ty) {
+ switch (Ty.SimpleTy) {
+ case MVT::i32:
+ return "i32";
+ case MVT::i64:
+ return "i64";
+ case MVT::f32:
+ return "f32";
+ case MVT::f64:
+ return "f64";
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v4f32:
+ return "v128";
+ default:
+ llvm_unreachable("unsupported type");
+ }
+}
+
+const char *llvm::WebAssembly::TypeToString(wasm::ValType Type) {
+ switch (Type) {
+ case wasm::ValType::I32:
+ return "i32";
+ case wasm::ValType::I64:
+ return "i64";
+ case wasm::ValType::F32:
+ return "f32";
+ case wasm::ValType::F64:
+ return "f64";
+ }
+ llvm_unreachable("unsupported type");
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
new file mode 100644
index 000000000000..b1de84d7e8e6
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.h
@@ -0,0 +1,60 @@
+// WebAssemblyInstPrinter.h - Print wasm MCInst to assembly syntax -*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This class prints an WebAssembly MCInst to wasm file syntax.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_INSTPRINTER_WEBASSEMBLYINSTPRINTER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_INSTPRINTER_WEBASSEMBLYINSTPRINTER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+
+class MCSubtargetInfo;
+
+class WebAssemblyInstPrinter final : public MCInstPrinter {
+ uint64_t ControlFlowCounter;
+ SmallVector<std::pair<uint64_t, bool>, 0> ControlFlowStack;
+
+public:
+ WebAssemblyInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI);
+
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
+
+ // Used by tblegen code.
+ void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
+ void printWebAssemblyP2AlignOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O);
+ void printWebAssemblySignatureOperand(const MCInst *MI, unsigned OpNo,
+ raw_ostream &O);
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+};
+
+namespace WebAssembly {
+
+const char *TypeToString(MVT Ty);
+const char *TypeToString(wasm::ValType Type);
+
+} // end namespace WebAssembly
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
new file mode 100644
index 000000000000..226a3b35f2cf
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyAsmBackend.cpp
@@ -0,0 +1,206 @@
+//===-- WebAssemblyAsmBackend.cpp - WebAssembly Assembler Backend ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements the WebAssemblyAsmBackend class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyFixupKinds.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCWasmObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+class WebAssemblyAsmBackendELF final : public MCAsmBackend {
+ bool Is64Bit;
+
+public:
+ explicit WebAssemblyAsmBackendELF(bool Is64Bit)
+ : MCAsmBackend(), Is64Bit(Is64Bit) {}
+ ~WebAssemblyAsmBackendELF() override {}
+
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsPCRel) const override;
+
+ std::unique_ptr<MCObjectWriter>
+ createObjectWriter(raw_pwrite_stream &OS) const override;
+
+ // No instruction requires relaxation
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ return false;
+ }
+
+ unsigned getNumFixupKinds() const override {
+ // We currently just use the generic fixups in MCFixup.h and don't have any
+ // target-specific fixups.
+ return 0;
+ }
+
+ bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
+
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {}
+
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
+};
+
+class WebAssemblyAsmBackend final : public MCAsmBackend {
+ bool Is64Bit;
+
+public:
+ explicit WebAssemblyAsmBackend(bool Is64Bit)
+ : MCAsmBackend(), Is64Bit(Is64Bit) {}
+ ~WebAssemblyAsmBackend() override {}
+
+ unsigned getNumFixupKinds() const override {
+ return WebAssembly::NumTargetFixupKinds;
+ }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
+
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsPCRel) const override;
+
+ std::unique_ptr<MCObjectWriter>
+ createObjectWriter(raw_pwrite_stream &OS) const override;
+
+ // No instruction requires relaxation
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ return false;
+ }
+
+ bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
+
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {}
+
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
+};
+
+bool WebAssemblyAsmBackendELF::writeNopData(uint64_t Count,
+ MCObjectWriter *OW) const {
+ for (uint64_t i = 0; i < Count; ++i)
+ OW->write8(WebAssembly::Nop);
+
+ return true;
+}
+
+void WebAssemblyAsmBackendELF::applyFixup(const MCAssembler &Asm,
+ const MCFixup &Fixup,
+ const MCValue &Target,
+ MutableArrayRef<char> Data,
+ uint64_t Value, bool IsPCRel) const {
+ const MCFixupKindInfo &Info = getFixupKindInfo(Fixup.getKind());
+ assert(Info.Flags == 0 && "WebAssembly does not use MCFixupKindInfo flags");
+
+ unsigned NumBytes = alignTo(Info.TargetSize, 8) / 8;
+ if (Value == 0)
+ return; // Doesn't change encoding.
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+}
+
+std::unique_ptr<MCObjectWriter>
+WebAssemblyAsmBackendELF::createObjectWriter(raw_pwrite_stream &OS) const {
+ return createWebAssemblyELFObjectWriter(OS, Is64Bit, 0);
+}
+
+const MCFixupKindInfo &
+WebAssemblyAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
+ const static MCFixupKindInfo Infos[WebAssembly::NumTargetFixupKinds] = {
+ // This table *must* be in the order that the fixup_* kinds are defined in
+ // WebAssemblyFixupKinds.h.
+ //
+ // Name Offset (bits) Size (bits) Flags
+ { "fixup_code_sleb128_i32", 0, 5*8, 0 },
+ { "fixup_code_sleb128_i64", 0, 10*8, 0 },
+ { "fixup_code_uleb128_i32", 0, 5*8, 0 },
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ return Infos[Kind - FirstTargetFixupKind];
+}
+
+bool WebAssemblyAsmBackend::writeNopData(uint64_t Count,
+ MCObjectWriter *OW) const {
+ if (Count == 0)
+ return true;
+
+ for (uint64_t i = 0; i < Count; ++i)
+ OW->write8(WebAssembly::Nop);
+
+ return true;
+}
+
+void WebAssemblyAsmBackend::applyFixup(const MCAssembler &Asm,
+ const MCFixup &Fixup,
+ const MCValue &Target,
+ MutableArrayRef<char> Data,
+ uint64_t Value, bool IsPCRel) const {
+ const MCFixupKindInfo &Info = getFixupKindInfo(Fixup.getKind());
+ assert(Info.Flags == 0 && "WebAssembly does not use MCFixupKindInfo flags");
+
+ unsigned NumBytes = alignTo(Info.TargetSize, 8) / 8;
+ if (Value == 0)
+ return; // Doesn't change encoding.
+
+ // Shift the value into position.
+ Value <<= Info.TargetOffset;
+
+ unsigned Offset = Fixup.getOffset();
+ assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
+
+ // For each byte of the fragment that the fixup touches, mask in the
+ // bits from the fixup value.
+ for (unsigned i = 0; i != NumBytes; ++i)
+ Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
+}
+
+std::unique_ptr<MCObjectWriter>
+WebAssemblyAsmBackend::createObjectWriter(raw_pwrite_stream &OS) const {
+ return createWebAssemblyWasmObjectWriter(OS, Is64Bit);
+}
+} // end anonymous namespace
+
+MCAsmBackend *llvm::createWebAssemblyAsmBackend(const Triple &TT) {
+ if (TT.isOSBinFormatELF())
+ return new WebAssemblyAsmBackendELF(TT.isArch64Bit());
+ return new WebAssemblyAsmBackend(TT.isArch64Bit());
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp
new file mode 100644
index 000000000000..b67ecfa455b3
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyELFObjectWriter.cpp
@@ -0,0 +1,68 @@
+//===-- WebAssemblyELFObjectWriter.cpp - WebAssembly ELF Writer -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file handles ELF-specific object emission, converting LLVM's
+/// internal fixups into the appropriate relocations.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace llvm;
+
+namespace {
+class WebAssemblyELFObjectWriter final : public MCELFObjectTargetWriter {
+public:
+ WebAssemblyELFObjectWriter(bool Is64Bit, uint8_t OSABI);
+
+protected:
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
+};
+} // end anonymous namespace
+
+WebAssemblyELFObjectWriter::WebAssemblyELFObjectWriter(bool Is64Bit,
+ uint8_t OSABI)
+ : MCELFObjectTargetWriter(Is64Bit, OSABI, ELF::EM_WEBASSEMBLY,
+ /*HasRelocationAddend=*/false) {}
+
+unsigned WebAssemblyELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+ // WebAssembly functions are not allocated in the address space. To resolve a
+ // pointer to a function, we must use a special relocation type.
+ if (const MCSymbolRefExpr *SyExp =
+ dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
+ if (SyExp->getKind() == MCSymbolRefExpr::VK_WebAssembly_FUNCTION)
+ return ELF::R_WEBASSEMBLY_FUNCTION;
+
+ switch (Fixup.getKind()) {
+ case FK_Data_4:
+ assert(!is64Bit() && "4-byte relocations only supported on wasm32");
+ return ELF::R_WEBASSEMBLY_DATA;
+ case FK_Data_8:
+ assert(is64Bit() && "8-byte relocations only supported on wasm64");
+ return ELF::R_WEBASSEMBLY_DATA;
+ default:
+ llvm_unreachable("unimplemented fixup kind");
+ }
+}
+
+std::unique_ptr<MCObjectWriter>
+llvm::createWebAssemblyELFObjectWriter(raw_pwrite_stream &OS,
+ bool Is64Bit,
+ uint8_t OSABI) {
+ auto MOTW = llvm::make_unique<WebAssemblyELFObjectWriter>(Is64Bit, OSABI);
+ return createELFObjectWriter(std::move(MOTW), OS, /*IsLittleEndian=*/true);
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h
new file mode 100644
index 000000000000..b0af63c924bd
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyFixupKinds.h
@@ -0,0 +1,31 @@
+//=- WebAssemblyFixupKinds.h - WebAssembly Specific Fixup Entries -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYFIXUPKINDS_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+namespace WebAssembly {
+enum Fixups {
+ fixup_code_sleb128_i32 = FirstTargetFixupKind, // 32-bit signed
+ fixup_code_sleb128_i64, // 64-bit signed
+ fixup_code_uleb128_i32, // 32-bit unsigned
+
+ fixup_code_global_index, // 32-bit unsigned
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+};
+} // end namespace WebAssembly
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
new file mode 100644
index 000000000000..5f8c78ed1683
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.cpp
@@ -0,0 +1,83 @@
+//===-- WebAssemblyMCAsmInfo.cpp - WebAssembly asm properties -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the declarations of the WebAssemblyMCAsmInfo
+/// properties.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMCAsmInfo.h"
+#include "llvm/ADT/Triple.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-mc-asm-info"
+
+WebAssemblyMCAsmInfoELF::~WebAssemblyMCAsmInfoELF() {}
+
+WebAssemblyMCAsmInfoELF::WebAssemblyMCAsmInfoELF(const Triple &T) {
+ CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
+
+ // TODO: What should MaxInstLength be?
+
+ UseDataRegionDirectives = true;
+
+ // Use .skip instead of .zero because .zero is confusing when used with two
+ // arguments (it doesn't actually zero things out).
+ ZeroDirective = "\t.skip\t";
+
+ Data8bitsDirective = "\t.int8\t";
+ Data16bitsDirective = "\t.int16\t";
+ Data32bitsDirective = "\t.int32\t";
+ Data64bitsDirective = "\t.int64\t";
+
+ AlignmentIsInBytes = false;
+ COMMDirectiveAlignmentIsInBytes = false;
+ LCOMMDirectiveAlignmentType = LCOMM::Log2Alignment;
+
+ SupportsDebugInformation = true;
+
+ // For now, WebAssembly does not support exceptions.
+ ExceptionsType = ExceptionHandling::None;
+
+ // TODO: UseIntegratedAssembler?
+
+ // WebAssembly's stack is never executable.
+ UsesNonexecutableStackSection = false;
+}
+
+WebAssemblyMCAsmInfo::~WebAssemblyMCAsmInfo() {}
+
+WebAssemblyMCAsmInfo::WebAssemblyMCAsmInfo(const Triple &T) {
+ CodePointerSize = CalleeSaveStackSlotSize = T.isArch64Bit() ? 8 : 4;
+
+ // TODO: What should MaxInstLength be?
+
+ UseDataRegionDirectives = true;
+
+ // Use .skip instead of .zero because .zero is confusing when used with two
+ // arguments (it doesn't actually zero things out).
+ ZeroDirective = "\t.skip\t";
+
+ Data8bitsDirective = "\t.int8\t";
+ Data16bitsDirective = "\t.int16\t";
+ Data32bitsDirective = "\t.int32\t";
+ Data64bitsDirective = "\t.int64\t";
+
+ AlignmentIsInBytes = false;
+ COMMDirectiveAlignmentIsInBytes = false;
+ LCOMMDirectiveAlignmentType = LCOMM::Log2Alignment;
+
+ SupportsDebugInformation = true;
+
+ // For now, WebAssembly does not support exceptions.
+ ExceptionsType = ExceptionHandling::None;
+
+ // TODO: UseIntegratedAssembler?
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h
new file mode 100644
index 000000000000..d9547096190e
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCAsmInfo.h
@@ -0,0 +1,39 @@
+//===-- WebAssemblyMCAsmInfo.h - WebAssembly asm properties -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the declaration of the WebAssemblyMCAsmInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCASMINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCASMINFO_H
+
+#include "llvm/MC/MCAsmInfoELF.h"
+#include "llvm/MC/MCAsmInfoWasm.h"
+
+namespace llvm {
+
+class Triple;
+
+class WebAssemblyMCAsmInfoELF final : public MCAsmInfoELF {
+public:
+ explicit WebAssemblyMCAsmInfoELF(const Triple &T);
+ ~WebAssemblyMCAsmInfoELF() override;
+};
+
+class WebAssemblyMCAsmInfo final : public MCAsmInfoWasm {
+public:
+ explicit WebAssemblyMCAsmInfo(const Triple &T);
+ ~WebAssemblyMCAsmInfo() override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
new file mode 100644
index 000000000000..77744e53d62f
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp
@@ -0,0 +1,152 @@
+//=- WebAssemblyMCCodeEmitter.cpp - Convert WebAssembly code to machine code -//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements the WebAssemblyMCCodeEmitter class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyFixupKinds.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
+STATISTIC(MCNumFixups, "Number of MC fixups created.");
+
+namespace {
+class WebAssemblyMCCodeEmitter final : public MCCodeEmitter {
+ const MCInstrInfo &MCII;
+
+ // Implementation generated by tablegen.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ void encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+
+public:
+ WebAssemblyMCCodeEmitter(const MCInstrInfo &mcii) : MCII(mcii) {}
+};
+} // end anonymous namespace
+
+MCCodeEmitter *llvm::createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII) {
+ return new WebAssemblyMCCodeEmitter(MCII);
+}
+
+void WebAssemblyMCCodeEmitter::encodeInstruction(
+ const MCInst &MI, raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ uint64_t Start = OS.tell();
+
+ uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
+ if (Binary <= UINT8_MAX) {
+ OS << uint8_t(Binary);
+ } else {
+ assert(Binary <= UINT16_MAX && "Several-byte opcodes not supported yet");
+ OS << uint8_t(Binary >> 8)
+ << uint8_t(Binary);
+ }
+
+ // For br_table instructions, encode the size of the table. In the MCInst,
+ // there's an index operand, one operand for each table entry, and the
+ // default operand.
+ if (MI.getOpcode() == WebAssembly::BR_TABLE_I32 ||
+ MI.getOpcode() == WebAssembly::BR_TABLE_I64)
+ encodeULEB128(MI.getNumOperands() - 2, OS);
+
+ const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
+ for (unsigned i = 0, e = MI.getNumOperands(); i < e; ++i) {
+ const MCOperand &MO = MI.getOperand(i);
+ if (MO.isReg()) {
+ /* nothing to encode */
+ } else if (MO.isImm()) {
+ if (i < Desc.getNumOperands()) {
+ assert(Desc.TSFlags == 0 &&
+ "WebAssembly non-variable_ops don't use TSFlags");
+ const MCOperandInfo &Info = Desc.OpInfo[i];
+ if (Info.OperandType == WebAssembly::OPERAND_I32IMM) {
+ encodeSLEB128(int32_t(MO.getImm()), OS);
+ } else if (Info.OperandType == WebAssembly::OPERAND_I64IMM) {
+ encodeSLEB128(int64_t(MO.getImm()), OS);
+ } else if (Info.OperandType == WebAssembly::OPERAND_GLOBAL) {
+ llvm_unreachable("wasm globals should only be accessed symbolicly");
+ } else if (Info.OperandType == WebAssembly::OPERAND_SIGNATURE) {
+ encodeSLEB128(int64_t(MO.getImm()), OS);
+ } else {
+ encodeULEB128(uint64_t(MO.getImm()), OS);
+ }
+ } else {
+ assert(Desc.TSFlags == (WebAssemblyII::VariableOpIsImmediate |
+ WebAssemblyII::VariableOpImmediateIsLabel));
+ encodeULEB128(uint64_t(MO.getImm()), OS);
+ }
+ } else if (MO.isFPImm()) {
+ assert(i < Desc.getNumOperands() &&
+ "Unexpected floating-point immediate as a non-fixed operand");
+ assert(Desc.TSFlags == 0 &&
+ "WebAssembly variable_ops floating point ops don't use TSFlags");
+ const MCOperandInfo &Info = Desc.OpInfo[i];
+ if (Info.OperandType == WebAssembly::OPERAND_F32IMM) {
+ // TODO: MC converts all floating point immediate operands to double.
+ // This is fine for numeric values, but may cause NaNs to change bits.
+ float f = float(MO.getFPImm());
+ support::endian::Writer<support::little>(OS).write<float>(f);
+ } else {
+ assert(Info.OperandType == WebAssembly::OPERAND_F64IMM);
+ double d = MO.getFPImm();
+ support::endian::Writer<support::little>(OS).write<double>(d);
+ }
+ } else if (MO.isExpr()) {
+ const MCOperandInfo &Info = Desc.OpInfo[i];
+ llvm::MCFixupKind FixupKind;
+ size_t PaddedSize = 5;
+ if (Info.OperandType == WebAssembly::OPERAND_I32IMM) {
+ FixupKind = MCFixupKind(WebAssembly::fixup_code_sleb128_i32);
+ } else if (Info.OperandType == WebAssembly::OPERAND_I64IMM) {
+ FixupKind = MCFixupKind(WebAssembly::fixup_code_sleb128_i64);
+ PaddedSize = 10;
+ } else if (Info.OperandType == WebAssembly::OPERAND_FUNCTION32 ||
+ Info.OperandType == WebAssembly::OPERAND_OFFSET32 ||
+ Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) {
+ FixupKind = MCFixupKind(WebAssembly::fixup_code_uleb128_i32);
+ } else if (Info.OperandType == WebAssembly::OPERAND_GLOBAL) {
+ FixupKind = MCFixupKind(WebAssembly::fixup_code_global_index);
+ } else {
+ llvm_unreachable("unexpected symbolic operand kind");
+ }
+ Fixups.push_back(MCFixup::create(
+ OS.tell() - Start, MO.getExpr(),
+ FixupKind, MI.getLoc()));
+ ++MCNumFixups;
+ encodeULEB128(0, OS, PaddedSize);
+ } else {
+ llvm_unreachable("unexpected operand kind");
+ }
+ }
+
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+}
+
+#include "WebAssemblyGenMCCodeEmitter.inc"
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
new file mode 100644
index 000000000000..e7c8809de70e
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.cpp
@@ -0,0 +1,140 @@
+//===-- WebAssemblyMCTargetDesc.cpp - WebAssembly Target Descriptions -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file provides WebAssembly-specific target descriptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMCTargetDesc.h"
+#include "InstPrinter/WebAssemblyInstPrinter.h"
+#include "WebAssemblyMCAsmInfo.h"
+#include "WebAssemblyTargetStreamer.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-mc-target-desc"
+
+#define GET_INSTRINFO_MC_DESC
+#include "WebAssemblyGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "WebAssemblyGenRegisterInfo.inc"
+
+static MCAsmInfo *createMCAsmInfo(const MCRegisterInfo & /*MRI*/,
+ const Triple &TT) {
+ if (TT.isOSBinFormatELF())
+ return new WebAssemblyMCAsmInfoELF(TT);
+ return new WebAssemblyMCAsmInfo(TT);
+}
+
+static MCInstrInfo *createMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitWebAssemblyMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createMCRegisterInfo(const Triple & /*T*/) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitWebAssemblyMCRegisterInfo(X, 0);
+ return X;
+}
+
+static MCInstPrinter *createMCInstPrinter(const Triple & /*T*/,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI) {
+ assert(SyntaxVariant == 0 && "WebAssembly only has one syntax variant");
+ return new WebAssemblyInstPrinter(MAI, MII, MRI);
+}
+
+static MCCodeEmitter *createCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo & /*MRI*/,
+ MCContext &Ctx) {
+ return createWebAssemblyMCCodeEmitter(MCII);
+}
+
+static MCAsmBackend *createAsmBackend(const Target & /*T*/,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo & /*MRI*/,
+ const MCTargetOptions & /*Options*/) {
+ return createWebAssemblyAsmBackend(STI.getTargetTriple());
+}
+
+static MCSubtargetInfo *createMCSubtargetInfo(const Triple &TT, StringRef CPU,
+ StringRef FS) {
+ return createWebAssemblyMCSubtargetInfoImpl(TT, CPU, FS);
+}
+
+static MCTargetStreamer *
+createObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
+ const Triple &TT = STI.getTargetTriple();
+ if (TT.isOSBinFormatELF())
+ return new WebAssemblyTargetELFStreamer(S);
+
+ return new WebAssemblyTargetWasmStreamer(S);
+}
+
+static MCTargetStreamer *createAsmTargetStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter * /*InstPrint*/,
+ bool /*isVerboseAsm*/) {
+ return new WebAssemblyTargetAsmStreamer(S, OS);
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeWebAssemblyTargetMC() {
+ for (Target *T :
+ {&getTheWebAssemblyTarget32(), &getTheWebAssemblyTarget64()}) {
+ // Register the MC asm info.
+ RegisterMCAsmInfoFn X(*T, createMCAsmInfo);
+
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(*T, createMCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(*T, createMCRegisterInfo);
+
+ // Register the MCInstPrinter.
+ TargetRegistry::RegisterMCInstPrinter(*T, createMCInstPrinter);
+
+ // Register the MC code emitter.
+ TargetRegistry::RegisterMCCodeEmitter(*T, createCodeEmitter);
+
+ // Register the ASM Backend.
+ TargetRegistry::RegisterMCAsmBackend(*T, createAsmBackend);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(*T, createMCSubtargetInfo);
+
+ // Register the object target streamer.
+ TargetRegistry::RegisterObjectTargetStreamer(*T,
+ createObjectTargetStreamer);
+ // Register the asm target streamer.
+ TargetRegistry::RegisterAsmTargetStreamer(*T, createAsmTargetStreamer);
+ }
+}
+
+wasm::ValType WebAssembly::toValType(const MVT &Ty) {
+ switch (Ty.SimpleTy) {
+ case MVT::i32: return wasm::ValType::I32;
+ case MVT::i64: return wasm::ValType::I64;
+ case MVT::f32: return wasm::ValType::F32;
+ case MVT::f64: return wasm::ValType::F64;
+ default: llvm_unreachable("unexpected type");
+ }
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
new file mode 100644
index 000000000000..7dca89ab822d
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -0,0 +1,185 @@
+//==- WebAssemblyMCTargetDesc.h - WebAssembly Target Descriptions -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file provides WebAssembly-specific target descriptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTARGETDESC_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYMCTARGETDESC_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Support/DataTypes.h"
+#include <memory>
+
+namespace llvm {
+
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
+class MCSubtargetInfo;
+class MVT;
+class Target;
+class Triple;
+class raw_pwrite_stream;
+
+Target &getTheWebAssemblyTarget32();
+Target &getTheWebAssemblyTarget64();
+
+MCCodeEmitter *createWebAssemblyMCCodeEmitter(const MCInstrInfo &MCII);
+
+MCAsmBackend *createWebAssemblyAsmBackend(const Triple &TT);
+
+std::unique_ptr<MCObjectWriter>
+createWebAssemblyELFObjectWriter(raw_pwrite_stream &OS,
+ bool Is64Bit, uint8_t OSABI);
+
+std::unique_ptr<MCObjectWriter>
+createWebAssemblyWasmObjectWriter(raw_pwrite_stream &OS,
+ bool Is64Bit);
+
+namespace WebAssembly {
+enum OperandType {
+ /// Basic block label in a branch construct.
+ OPERAND_BASIC_BLOCK = MCOI::OPERAND_FIRST_TARGET,
+ /// Local index.
+ OPERAND_LOCAL,
+ /// Global index.
+ OPERAND_GLOBAL,
+ /// 32-bit integer immediates.
+ OPERAND_I32IMM,
+ /// 64-bit integer immediates.
+ OPERAND_I64IMM,
+ /// 32-bit floating-point immediates.
+ OPERAND_F32IMM,
+ /// 64-bit floating-point immediates.
+ OPERAND_F64IMM,
+ /// 32-bit unsigned function indices.
+ OPERAND_FUNCTION32,
+ /// 32-bit unsigned memory offsets.
+ OPERAND_OFFSET32,
+ /// p2align immediate for load and store address alignment.
+ OPERAND_P2ALIGN,
+ /// signature immediate for block/loop.
+ OPERAND_SIGNATURE,
+ /// type signature immediate for call_indirect.
+ OPERAND_TYPEINDEX,
+};
+} // end namespace WebAssembly
+
+namespace WebAssemblyII {
+enum {
+ // For variadic instructions, this flag indicates whether an operand
+ // in the variable_ops range is an immediate value.
+ VariableOpIsImmediate = (1 << 0),
+ // For immediate values in the variable_ops range, this flag indicates
+ // whether the value represents a control-flow label.
+ VariableOpImmediateIsLabel = (1 << 1)
+};
+} // end namespace WebAssemblyII
+
+} // end namespace llvm
+
+// Defines symbolic names for WebAssembly registers. This defines a mapping from
+// register name to register number.
+//
+#define GET_REGINFO_ENUM
+#include "WebAssemblyGenRegisterInfo.inc"
+
+// Defines symbolic names for the WebAssembly instructions.
+//
+#define GET_INSTRINFO_ENUM
+#include "WebAssemblyGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+namespace llvm {
+namespace WebAssembly {
+
+/// Return the default p2align value for a load or store with the given opcode.
+inline unsigned GetDefaultP2Align(unsigned Opcode) {
+ switch (Opcode) {
+ case WebAssembly::LOAD8_S_I32:
+ case WebAssembly::LOAD8_U_I32:
+ case WebAssembly::LOAD8_S_I64:
+ case WebAssembly::LOAD8_U_I64:
+ case WebAssembly::ATOMIC_LOAD8_U_I32:
+ case WebAssembly::ATOMIC_LOAD8_U_I64:
+ case WebAssembly::STORE8_I32:
+ case WebAssembly::STORE8_I64:
+ return 0;
+ case WebAssembly::LOAD16_S_I32:
+ case WebAssembly::LOAD16_U_I32:
+ case WebAssembly::LOAD16_S_I64:
+ case WebAssembly::LOAD16_U_I64:
+ case WebAssembly::ATOMIC_LOAD16_U_I32:
+ case WebAssembly::ATOMIC_LOAD16_U_I64:
+ case WebAssembly::STORE16_I32:
+ case WebAssembly::STORE16_I64:
+ return 1;
+ case WebAssembly::LOAD_I32:
+ case WebAssembly::LOAD_F32:
+ case WebAssembly::STORE_I32:
+ case WebAssembly::STORE_F32:
+ case WebAssembly::LOAD32_S_I64:
+ case WebAssembly::LOAD32_U_I64:
+ case WebAssembly::STORE32_I64:
+ case WebAssembly::ATOMIC_LOAD_I32:
+ case WebAssembly::ATOMIC_LOAD32_U_I64:
+ return 2;
+ case WebAssembly::LOAD_I64:
+ case WebAssembly::LOAD_F64:
+ case WebAssembly::STORE_I64:
+ case WebAssembly::STORE_F64:
+ case WebAssembly::ATOMIC_LOAD_I64:
+ return 3;
+ default:
+ llvm_unreachable("Only loads and stores have p2align values");
+ }
+}
+
+/// The operand number of the load or store address in load/store instructions.
+static const unsigned LoadAddressOperandNo = 3;
+static const unsigned StoreAddressOperandNo = 2;
+
+/// The operand number of the load or store p2align in load/store instructions.
+static const unsigned LoadP2AlignOperandNo = 1;
+static const unsigned StoreP2AlignOperandNo = 0;
+
+/// This is used to indicate block signatures.
+enum class ExprType {
+ Void = -0x40,
+ I32 = -0x01,
+ I64 = -0x02,
+ F32 = -0x03,
+ F64 = -0x04,
+ I8x16 = -0x05,
+ I16x8 = -0x06,
+ I32x4 = -0x07,
+ F32x4 = -0x08,
+ B8x16 = -0x09,
+ B16x8 = -0x0a,
+ B32x4 = -0x0b
+};
+
+/// Instruction opcodes emitted via means other than CodeGen.
+static const unsigned Nop = 0x01;
+static const unsigned End = 0x0b;
+
+wasm::ValType toValType(const MVT &Ty);
+
+} // end namespace WebAssembly
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
new file mode 100644
index 000000000000..0ca52ad651b5
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.cpp
@@ -0,0 +1,264 @@
+//==-- WebAssemblyTargetStreamer.cpp - WebAssembly Target Streamer Methods --=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines WebAssembly-specific target streamer classes.
+/// These are for implementing support for target-specific assembly directives.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetStreamer.h"
+#include "InstPrinter/WebAssemblyInstPrinter.h"
+#include "WebAssemblyMCTargetDesc.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionWasm.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
+using namespace llvm;
+
+WebAssemblyTargetStreamer::WebAssemblyTargetStreamer(MCStreamer &S)
+ : MCTargetStreamer(S) {}
+
+void WebAssemblyTargetStreamer::emitValueType(wasm::ValType Type) {
+ Streamer.EmitSLEB128IntValue(int32_t(Type));
+}
+
+WebAssemblyTargetAsmStreamer::WebAssemblyTargetAsmStreamer(
+ MCStreamer &S, formatted_raw_ostream &OS)
+ : WebAssemblyTargetStreamer(S), OS(OS) {}
+
+WebAssemblyTargetELFStreamer::WebAssemblyTargetELFStreamer(MCStreamer &S)
+ : WebAssemblyTargetStreamer(S) {}
+
+WebAssemblyTargetWasmStreamer::WebAssemblyTargetWasmStreamer(MCStreamer &S)
+ : WebAssemblyTargetStreamer(S) {}
+
+static void PrintTypes(formatted_raw_ostream &OS, ArrayRef<MVT> Types) {
+ bool First = true;
+ for (MVT Type : Types) {
+ if (First)
+ First = false;
+ else
+ OS << ", ";
+ OS << WebAssembly::TypeToString(Type);
+ }
+ OS << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitParam(MCSymbol *Symbol,
+ ArrayRef<MVT> Types) {
+ if (!Types.empty()) {
+ OS << "\t.param \t";
+
+ // FIXME: Currently this applies to the "current" function; it may
+ // be cleaner to specify an explicit symbol as part of the directive.
+
+ PrintTypes(OS, Types);
+ }
+}
+
+void WebAssemblyTargetAsmStreamer::emitResult(MCSymbol *Symbol,
+ ArrayRef<MVT> Types) {
+ if (!Types.empty()) {
+ OS << "\t.result \t";
+
+ // FIXME: Currently this applies to the "current" function; it may
+ // be cleaner to specify an explicit symbol as part of the directive.
+
+ PrintTypes(OS, Types);
+ }
+}
+
+void WebAssemblyTargetAsmStreamer::emitLocal(ArrayRef<MVT> Types) {
+ if (!Types.empty()) {
+ OS << "\t.local \t";
+ PrintTypes(OS, Types);
+ }
+}
+
+void WebAssemblyTargetAsmStreamer::emitGlobal(
+ ArrayRef<wasm::Global> Globals) {
+ if (!Globals.empty()) {
+ OS << "\t.globalvar \t";
+
+ bool First = true;
+ for (const wasm::Global &G : Globals) {
+ if (First)
+ First = false;
+ else
+ OS << ", ";
+ OS << WebAssembly::TypeToString(G.Type);
+ if (!G.InitialModule.empty())
+ OS << '=' << G.InitialModule << ':' << G.InitialName;
+ else
+ OS << '=' << G.InitialValue;
+ }
+ OS << '\n';
+ }
+}
+
+void WebAssemblyTargetAsmStreamer::emitEndFunc() { OS << "\t.endfunc\n"; }
+
+void WebAssemblyTargetAsmStreamer::emitIndirectFunctionType(
+ MCSymbol *Symbol, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) {
+ OS << "\t.functype\t" << Symbol->getName();
+ if (Results.empty())
+ OS << ", void";
+ else {
+ assert(Results.size() == 1);
+ OS << ", " << WebAssembly::TypeToString(Results.front());
+ }
+ for (auto Ty : Params)
+ OS << ", " << WebAssembly::TypeToString(Ty);
+ OS << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitGlobalImport(StringRef name) {
+ OS << "\t.import_global\t" << name << '\n';
+}
+
+void WebAssemblyTargetAsmStreamer::emitIndIdx(const MCExpr *Value) {
+ OS << "\t.indidx \t" << *Value << '\n';
+}
+
+void WebAssemblyTargetELFStreamer::emitParam(MCSymbol *Symbol,
+ ArrayRef<MVT> Types) {
+ // Nothing to emit; params are declared as part of the function signature.
+}
+
+void WebAssemblyTargetELFStreamer::emitResult(MCSymbol *Symbol,
+ ArrayRef<MVT> Types) {
+ // Nothing to emit; results are declared as part of the function signature.
+}
+
+void WebAssemblyTargetELFStreamer::emitLocal(ArrayRef<MVT> Types) {
+ Streamer.EmitULEB128IntValue(Types.size());
+ for (MVT Type : Types)
+ emitValueType(WebAssembly::toValType(Type));
+}
+
+void WebAssemblyTargetELFStreamer::emitGlobal(
+ ArrayRef<wasm::Global> Globals) {
+ llvm_unreachable(".globalvar encoding not yet implemented");
+}
+
+void WebAssemblyTargetELFStreamer::emitEndFunc() {
+ Streamer.EmitIntValue(WebAssembly::End, 1);
+}
+
+void WebAssemblyTargetELFStreamer::emitIndIdx(const MCExpr *Value) {
+ llvm_unreachable(".indidx encoding not yet implemented");
+}
+
+void WebAssemblyTargetELFStreamer::emitIndirectFunctionType(
+ MCSymbol *Symbol, SmallVectorImpl<MVT> &Params, SmallVectorImpl<MVT> &Results) {
+ // Nothing to emit here. TODO: Re-design how linking works and re-evaluate
+ // whether it's necessary for .o files to declare indirect function types.
+}
+
+void WebAssemblyTargetELFStreamer::emitGlobalImport(StringRef name) {
+}
+
+void WebAssemblyTargetWasmStreamer::emitParam(MCSymbol *Symbol,
+ ArrayRef<MVT> Types) {
+ SmallVector<wasm::ValType, 4> Params;
+ for (MVT Ty : Types)
+ Params.push_back(WebAssembly::toValType(Ty));
+
+ cast<MCSymbolWasm>(Symbol)->setParams(std::move(Params));
+}
+
+void WebAssemblyTargetWasmStreamer::emitResult(MCSymbol *Symbol,
+ ArrayRef<MVT> Types) {
+ SmallVector<wasm::ValType, 4> Returns;
+ for (MVT Ty : Types)
+ Returns.push_back(WebAssembly::toValType(Ty));
+
+ cast<MCSymbolWasm>(Symbol)->setReturns(std::move(Returns));
+}
+
+void WebAssemblyTargetWasmStreamer::emitLocal(ArrayRef<MVT> Types) {
+ SmallVector<std::pair<MVT, uint32_t>, 4> Grouped;
+ for (MVT Type : Types) {
+ if (Grouped.empty() || Grouped.back().first != Type)
+ Grouped.push_back(std::make_pair(Type, 1));
+ else
+ ++Grouped.back().second;
+ }
+
+ Streamer.EmitULEB128IntValue(Grouped.size());
+ for (auto Pair : Grouped) {
+ Streamer.EmitULEB128IntValue(Pair.second);
+ emitValueType(WebAssembly::toValType(Pair.first));
+ }
+}
+
+void WebAssemblyTargetWasmStreamer::emitGlobal(
+ ArrayRef<wasm::Global> Globals) {
+ // Encode the globals use by the funciton into the special .global_variables
+ // section. This will later be decoded and turned into contents for the
+ // Globals Section.
+ Streamer.PushSection();
+ Streamer.SwitchSection(Streamer.getContext().getWasmSection(
+ ".global_variables", SectionKind::getMetadata()));
+ for (const wasm::Global &G : Globals) {
+ Streamer.EmitIntValue(int32_t(G.Type), 1);
+ Streamer.EmitIntValue(G.Mutable, 1);
+ if (G.InitialModule.empty()) {
+ Streamer.EmitIntValue(0, 1); // indicate that we have an int value
+ Streamer.EmitSLEB128IntValue(0);
+ } else {
+ Streamer.EmitIntValue(1, 1); // indicate that we have a module import
+ Streamer.EmitBytes(G.InitialModule);
+ Streamer.EmitIntValue(0, 1); // nul-terminate
+ Streamer.EmitBytes(G.InitialName);
+ Streamer.EmitIntValue(0, 1); // nul-terminate
+ }
+ }
+ Streamer.PopSection();
+}
+
+void WebAssemblyTargetWasmStreamer::emitEndFunc() {
+ llvm_unreachable(".end_func is not needed for direct wasm output");
+}
+
+void WebAssemblyTargetWasmStreamer::emitIndIdx(const MCExpr *Value) {
+ llvm_unreachable(".indidx encoding not yet implemented");
+}
+
+void WebAssemblyTargetWasmStreamer::emitIndirectFunctionType(
+ MCSymbol *Symbol, SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) {
+ MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Symbol);
+ if (WasmSym->isFunction()) {
+ // Symbol already has its arguments and result set.
+ return;
+ }
+
+ SmallVector<wasm::ValType, 4> ValParams;
+ for (MVT Ty : Params)
+ ValParams.push_back(WebAssembly::toValType(Ty));
+
+ SmallVector<wasm::ValType, 1> ValResults;
+ for (MVT Ty : Results)
+ ValResults.push_back(WebAssembly::toValType(Ty));
+
+ WasmSym->setParams(std::move(ValParams));
+ WasmSym->setReturns(std::move(ValResults));
+ WasmSym->setIsFunction(true);
+}
+
+void WebAssemblyTargetWasmStreamer::emitGlobalImport(StringRef name) {
+ llvm_unreachable(".global_import is not needed for direct wasm output");
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
new file mode 100644
index 000000000000..2cb21a20580b
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h
@@ -0,0 +1,112 @@
+//==-- WebAssemblyTargetStreamer.h - WebAssembly Target Streamer -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares WebAssembly-specific target streamer classes.
+/// These are for implementing support for target-specific assembly directives.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYTARGETSTREAMER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_MCTARGETDESC_WEBASSEMBLYTARGETSTREAMER_H
+
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+
+class MCELFStreamer;
+class MCWasmStreamer;
+
+/// WebAssembly-specific streamer interface, to implement support
+/// WebAssembly-specific assembly directives.
+class WebAssemblyTargetStreamer : public MCTargetStreamer {
+public:
+ explicit WebAssemblyTargetStreamer(MCStreamer &S);
+
+ /// .param
+ virtual void emitParam(MCSymbol *Symbol, ArrayRef<MVT> Types) = 0;
+ /// .result
+ virtual void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) = 0;
+ /// .local
+ virtual void emitLocal(ArrayRef<MVT> Types) = 0;
+ /// .globalvar
+ virtual void emitGlobal(ArrayRef<wasm::Global> Globals) = 0;
+ /// .endfunc
+ virtual void emitEndFunc() = 0;
+ /// .functype
+ virtual void emitIndirectFunctionType(MCSymbol *Symbol,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) = 0;
+ /// .indidx
+ virtual void emitIndIdx(const MCExpr *Value) = 0;
+ /// .import_global
+ virtual void emitGlobalImport(StringRef name) = 0;
+
+protected:
+ void emitValueType(wasm::ValType Type);
+};
+
+/// This part is for ascii assembly output
+class WebAssemblyTargetAsmStreamer final : public WebAssemblyTargetStreamer {
+ formatted_raw_ostream &OS;
+
+public:
+ WebAssemblyTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+
+ void emitParam(MCSymbol *Symbol, ArrayRef<MVT> Types) override;
+ void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) override;
+ void emitLocal(ArrayRef<MVT> Types) override;
+ void emitGlobal(ArrayRef<wasm::Global> Globals) override;
+ void emitEndFunc() override;
+ void emitIndirectFunctionType(MCSymbol *Symbol,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) override;
+ void emitIndIdx(const MCExpr *Value) override;
+ void emitGlobalImport(StringRef name) override;
+};
+
+/// This part is for ELF object output
+class WebAssemblyTargetELFStreamer final : public WebAssemblyTargetStreamer {
+public:
+ explicit WebAssemblyTargetELFStreamer(MCStreamer &S);
+
+ void emitParam(MCSymbol *Symbol, ArrayRef<MVT> Types) override;
+ void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) override;
+ void emitLocal(ArrayRef<MVT> Types) override;
+ void emitGlobal(ArrayRef<wasm::Global> Globals) override;
+ void emitEndFunc() override;
+ void emitIndirectFunctionType(MCSymbol *Symbol,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) override;
+ void emitIndIdx(const MCExpr *Value) override;
+ void emitGlobalImport(StringRef name) override;
+};
+
+/// This part is for Wasm object output
+class WebAssemblyTargetWasmStreamer final : public WebAssemblyTargetStreamer {
+public:
+ explicit WebAssemblyTargetWasmStreamer(MCStreamer &S);
+
+ void emitParam(MCSymbol *Symbol, ArrayRef<MVT> Types) override;
+ void emitResult(MCSymbol *Symbol, ArrayRef<MVT> Types) override;
+ void emitLocal(ArrayRef<MVT> Types) override;
+ void emitGlobal(ArrayRef<wasm::Global> Globals) override;
+ void emitEndFunc() override;
+ void emitIndirectFunctionType(MCSymbol *Symbol,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) override;
+ void emitIndIdx(const MCExpr *Value) override;
+ void emitGlobalImport(StringRef name) override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp
new file mode 100644
index 000000000000..39abde26df7f
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp
@@ -0,0 +1,102 @@
+//===-- WebAssemblyWasmObjectWriter.cpp - WebAssembly Wasm Writer ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file handles Wasm-specific object emission, converting LLVM's
+/// internal fixups into the appropriate relocations.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyFixupKinds.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/BinaryFormat/Wasm.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/MC/MCWasmObjectWriter.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+class WebAssemblyWasmObjectWriter final : public MCWasmObjectTargetWriter {
+public:
+ explicit WebAssemblyWasmObjectWriter(bool Is64Bit);
+
+private:
+ unsigned getRelocType(const MCValue &Target,
+ const MCFixup &Fixup) const override;
+};
+} // end anonymous namespace
+
+WebAssemblyWasmObjectWriter::WebAssemblyWasmObjectWriter(bool Is64Bit)
+ : MCWasmObjectTargetWriter(Is64Bit) {}
+
+// Test whether the given expression computes a function address.
+static bool IsFunctionExpr(const MCExpr *Expr) {
+ if (auto SyExp = dyn_cast<MCSymbolRefExpr>(Expr))
+ return cast<MCSymbolWasm>(SyExp->getSymbol()).isFunction();
+
+ if (auto BinOp = dyn_cast<MCBinaryExpr>(Expr))
+ return IsFunctionExpr(BinOp->getLHS()) != IsFunctionExpr(BinOp->getRHS());
+
+ if (auto UnOp = dyn_cast<MCUnaryExpr>(Expr))
+ return IsFunctionExpr(UnOp->getSubExpr());
+
+ return false;
+}
+
+static bool IsFunctionType(const MCValue &Target) {
+ const MCSymbolRefExpr *RefA = Target.getSymA();
+ return RefA && RefA->getKind() == MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX;
+}
+
+unsigned
+WebAssemblyWasmObjectWriter::getRelocType(const MCValue &Target,
+ const MCFixup &Fixup) const {
+ // WebAssembly functions are not allocated in the data address space. To
+ // resolve a pointer to a function, we must use a special relocation type.
+ bool IsFunction = IsFunctionExpr(Fixup.getValue());
+
+ switch (unsigned(Fixup.getKind())) {
+ case WebAssembly::fixup_code_global_index:
+ return wasm::R_WEBASSEMBLY_GLOBAL_INDEX_LEB;
+ case WebAssembly::fixup_code_sleb128_i32:
+ if (IsFunction)
+ return wasm::R_WEBASSEMBLY_TABLE_INDEX_SLEB;
+ return wasm::R_WEBASSEMBLY_MEMORY_ADDR_SLEB;
+ case WebAssembly::fixup_code_sleb128_i64:
+ llvm_unreachable("fixup_sleb128_i64 not implemented yet");
+ case WebAssembly::fixup_code_uleb128_i32:
+ if (IsFunctionType(Target))
+ return wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB;
+ if (IsFunction)
+ return wasm::R_WEBASSEMBLY_FUNCTION_INDEX_LEB;
+ return wasm::R_WEBASSEMBLY_MEMORY_ADDR_LEB;
+ case FK_Data_4:
+ if (IsFunction)
+ return wasm::R_WEBASSEMBLY_TABLE_INDEX_I32;
+ return wasm::R_WEBASSEMBLY_MEMORY_ADDR_I32;
+ case FK_Data_8:
+ llvm_unreachable("FK_Data_8 not implemented yet");
+ default:
+ llvm_unreachable("unimplemented fixup kind");
+ }
+}
+
+std::unique_ptr<MCObjectWriter>
+llvm::createWebAssemblyWasmObjectWriter(raw_pwrite_stream &OS,
+ bool Is64Bit) {
+ auto MOTW = llvm::make_unique<WebAssemblyWasmObjectWriter>(Is64Bit);
+ return createWasmObjectWriter(std::move(MOTW), OS);
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/README.txt b/contrib/llvm/lib/Target/WebAssembly/README.txt
new file mode 100644
index 000000000000..3433b1553e8c
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/README.txt
@@ -0,0 +1,168 @@
+//===-- README.txt - Notes for WebAssembly code gen -----------------------===//
+
+This WebAssembly backend is presently under development.
+
+Currently the easiest way to use it is through Emscripten, which provides a
+compilation environment that includes standard libraries, tools, and packaging
+for producing WebAssembly applications that can run in browsers and other
+environments. For more information, see the Emscripten documentation in
+general, and this page in particular:
+ * https://github.com/kripken/emscripten/wiki/New-WebAssembly-Backend
+
+Other ways of using this backend, such as via a standalone "clang", are also
+under development, though they are not generally usable yet.
+
+For more information on WebAssembly itself, see the home page:
+ * https://webassembly.github.io/
+
+The following documents contain some information on the semantics and binary
+encoding of WebAssembly itself:
+ * https://github.com/WebAssembly/design/blob/master/Semantics.md
+ * https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
+
+The backend is built, tested and archived on the following waterfall:
+ https://wasm-stat.us
+
+The backend's bringup is done in part by using the GCC torture test suite, since
+it doesn't require C library support. Current known failures are in
+known_gcc_test_failures.txt, all other tests should pass. The waterfall will
+turn red if not. Once most of these pass, further testing will use LLVM's own
+test suite. The tests can be run locally using:
+ https://github.com/WebAssembly/waterfall/blob/master/src/compile_torture_tests.py
+
+//===---------------------------------------------------------------------===//
+
+Br, br_if, and br_table instructions can support having a value on the value
+stack across the jump (sometimes). We should (a) model this, and (b) extend
+the stackifier to utilize it.
+
+//===---------------------------------------------------------------------===//
+
+The min/max instructions aren't exactly a<b?a:b because of NaN and negative zero
+behavior. The ARM target has the same kind of min/max instructions and has
+implemented optimizations for them; we should do similar optimizations for
+WebAssembly.
+
+//===---------------------------------------------------------------------===//
+
+AArch64 runs SeparateConstOffsetFromGEPPass, followed by EarlyCSE and LICM.
+Would these be useful to run for WebAssembly too? Also, it has an option to
+run SimplifyCFG after running the AtomicExpand pass. Would this be useful for
+us too?
+
+//===---------------------------------------------------------------------===//
+
+Register stackification uses the VALUE_STACK physical register to impose
+ordering dependencies on instructions with stack operands. This is pessimistic;
+we should consider alternate ways to model stack dependencies.
+
+//===---------------------------------------------------------------------===//
+
+Lots of things could be done in WebAssemblyTargetTransformInfo.cpp. Similarly,
+there are numerous optimization-related hooks that can be overridden in
+WebAssemblyTargetLowering.
+
+//===---------------------------------------------------------------------===//
+
+Instead of the OptimizeReturned pass, which should consider preserving the
+"returned" attribute through to MachineInstrs and extending the StoreResults
+pass to do this optimization on calls too. That would also let the
+WebAssemblyPeephole pass clean up dead defs for such calls, as it does for
+stores.
+
+//===---------------------------------------------------------------------===//
+
+Consider implementing optimizeSelect, optimizeCompareInstr, optimizeCondBranch,
+optimizeLoadInstr, and/or getMachineCombinerPatterns.
+
+//===---------------------------------------------------------------------===//
+
+Find a clean way to fix the problem which leads to the Shrink Wrapping pass
+being run after the WebAssembly PEI pass.
+
+//===---------------------------------------------------------------------===//
+
+When setting multiple local variables to the same constant, we currently get
+code like this:
+
+ i32.const $4=, 0
+ i32.const $3=, 0
+
+It could be done with a smaller encoding like this:
+
+ i32.const $push5=, 0
+ tee_local $push6=, $4=, $pop5
+ copy_local $3=, $pop6
+
+//===---------------------------------------------------------------------===//
+
+WebAssembly registers are implicitly initialized to zero. Explicit zeroing is
+therefore often redundant and could be optimized away.
+
+//===---------------------------------------------------------------------===//
+
+Small indices may use smaller encodings than large indices.
+WebAssemblyRegColoring and/or WebAssemblyRegRenumbering should sort registers
+according to their usage frequency to maximize the usage of smaller encodings.
+
+//===---------------------------------------------------------------------===//
+
+Many cases of irreducible control flow could be transformed more optimally
+than via the transform in WebAssemblyFixIrreducibleControlFlow.cpp.
+
+It may also be worthwhile to do transforms before register coloring,
+particularly when duplicating code, to allow register coloring to be aware of
+the duplication.
+
+//===---------------------------------------------------------------------===//
+
+WebAssemblyRegStackify could use AliasAnalysis to reorder loads and stores more
+aggressively.
+
+//===---------------------------------------------------------------------===//
+
+WebAssemblyRegStackify is currently a greedy algorithm. This means that, for
+example, a binary operator will stackify with its user before its operands.
+However, if moving the binary operator to its user moves it to a place where
+its operands can't be moved to, it would be better to leave it in place, or
+perhaps move it up, so that it can stackify its operands. A binary operator
+has two operands and one result, so in such cases there could be a net win by
+prefering the operands.
+
+//===---------------------------------------------------------------------===//
+
+Instruction ordering has a significant influence on register stackification and
+coloring. Consider experimenting with the MachineScheduler (enable via
+enableMachineScheduler) and determine if it can be configured to schedule
+instructions advantageously for this purpose.
+
+//===---------------------------------------------------------------------===//
+
+WebAssembly is now officially a stack machine, rather than an AST, and this
+comes with additional opportunities for WebAssemblyRegStackify. Specifically,
+the stack doesn't need to be empty after an instruction with no return values.
+WebAssemblyRegStackify could be extended, or possibly rewritten, to take
+advantage of the new opportunities.
+
+//===---------------------------------------------------------------------===//
+
+Add support for mergeable sections in the Wasm writer, such as for strings and
+floating-point constants.
+
+//===---------------------------------------------------------------------===//
+
+The function @dynamic_alloca_redzone in test/CodeGen/WebAssembly/userstack.ll
+ends up with a tee_local in its prolog which has an unused result, requiring
+an extra drop:
+
+ get_global $push8=, 0
+ tee_local $push9=, 1, $pop8
+ drop $pop9
+ [...]
+
+The prologue code initially thinks it needs an FP register, but later it
+turns out to be unneeded, so one could either approach this by being more
+clever about not inserting code for an FP in the first place, or optimizing
+away the copy later.
+
+//===---------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp
new file mode 100644
index 000000000000..a2c03b1a0400
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/TargetInfo/WebAssemblyTargetInfo.cpp
@@ -0,0 +1,36 @@
+//===-- WebAssemblyTargetInfo.cpp - WebAssembly Target Implementation -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file registers the WebAssembly target.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-target-info"
+
+Target &llvm::getTheWebAssemblyTarget32() {
+ static Target TheWebAssemblyTarget32;
+ return TheWebAssemblyTarget32;
+}
+Target &llvm::getTheWebAssemblyTarget64() {
+ static Target TheWebAssemblyTarget64;
+ return TheWebAssemblyTarget64;
+}
+
+extern "C" void LLVMInitializeWebAssemblyTargetInfo() {
+ RegisterTarget<Triple::wasm32> X(getTheWebAssemblyTarget32(), "wasm32",
+ "WebAssembly 32-bit", "WebAssembly");
+ RegisterTarget<Triple::wasm64> Y(getTheWebAssemblyTarget64(), "wasm64",
+ "WebAssembly 64-bit", "WebAssembly");
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h
new file mode 100644
index 000000000000..7ac6c3991531
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.h
@@ -0,0 +1,59 @@
+//===-- WebAssembly.h - Top-level interface for WebAssembly ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the entry points for global functions defined in
+/// the LLVM WebAssembly back-end.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLY_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLY_H
+
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+
+class WebAssemblyTargetMachine;
+class ModulePass;
+class FunctionPass;
+
+// LLVM IR passes.
+ModulePass *createWebAssemblyLowerEmscriptenEHSjLj(bool DoEH, bool DoSjLj);
+void initializeWebAssemblyLowerEmscriptenEHSjLjPass(PassRegistry &);
+ModulePass *createWebAssemblyLowerGlobalDtors();
+ModulePass *createWebAssemblyFixFunctionBitcasts();
+FunctionPass *createWebAssemblyOptimizeReturned();
+
+// ISel and immediate followup passes.
+FunctionPass *createWebAssemblyISelDag(WebAssemblyTargetMachine &TM,
+ CodeGenOpt::Level OptLevel);
+FunctionPass *createWebAssemblyArgumentMove();
+FunctionPass *createWebAssemblySetP2AlignOperands();
+
+// Late passes.
+FunctionPass *createWebAssemblyReplacePhysRegs();
+FunctionPass *createWebAssemblyPrepareForLiveIntervals();
+FunctionPass *createWebAssemblyOptimizeLiveIntervals();
+FunctionPass *createWebAssemblyStoreResults();
+FunctionPass *createWebAssemblyRegStackify();
+FunctionPass *createWebAssemblyRegColoring();
+FunctionPass *createWebAssemblyExplicitLocals();
+FunctionPass *createWebAssemblyFixIrreducibleControlFlow();
+FunctionPass *createWebAssemblyCFGSort();
+FunctionPass *createWebAssemblyCFGStackify();
+FunctionPass *createWebAssemblyLowerBrUnless();
+FunctionPass *createWebAssemblyRegNumbering();
+FunctionPass *createWebAssemblyPeephole();
+FunctionPass *createWebAssemblyCallIndirectFixup();
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td
new file mode 100644
index 000000000000..99cf1f119a20
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssembly.td
@@ -0,0 +1,73 @@
+//- WebAssembly.td - Describe the WebAssembly Target Machine --*- tablegen -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This is a target description file for the WebAssembly architecture,
+/// which is also known as "wasm".
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Subtarget features.
+//===----------------------------------------------------------------------===//
+
+def FeatureSIMD128 : SubtargetFeature<"simd128", "HasSIMD128", "true",
+ "Enable 128-bit SIMD">;
+def FeatureAtomics : SubtargetFeature<"atomics", "HasAtomics", "true",
+ "Enable Atomics">;
+def FeatureNontrappingFPToInt :
+ SubtargetFeature<"nontrapping-fptoint",
+ "HasNontrappingFPToInt", "true",
+ "Enable non-trapping float-to-int conversion operators">;
+
+//===----------------------------------------------------------------------===//
+// Architectures.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Register File Description
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyRegisterInfo.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyInstrInfo.td"
+
+def WebAssemblyInstrInfo : InstrInfo;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Processors supported.
+//===----------------------------------------------------------------------===//
+
+// Minimal Viable Product.
+def : ProcessorModel<"mvp", NoSchedModel, []>;
+
+// Generic processor: latest stable version.
+def : ProcessorModel<"generic", NoSchedModel, []>;
+
+// Latest and greatest experimental version of WebAssembly. Bugs included!
+def : ProcessorModel<"bleeding-edge", NoSchedModel,
+ [FeatureSIMD128, FeatureAtomics]>;
+
+//===----------------------------------------------------------------------===//
+// Target Declaration
+//===----------------------------------------------------------------------===//
+
+def WebAssembly : Target {
+ let InstructionSet = WebAssemblyInstrInfo;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
new file mode 100644
index 000000000000..5fadca38b820
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyArgumentMove.cpp
@@ -0,0 +1,95 @@
+//===-- WebAssemblyArgumentMove.cpp - Argument instruction moving ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file moves ARGUMENT instructions after ScheduleDAG scheduling.
+///
+/// Arguments are really live-in registers, however, since we use virtual
+/// registers and LLVM doesn't support live-in virtual registers, we're
+/// currently making do with ARGUMENT instructions which are placed at the top
+/// of the entry block. The trick is to get them to *stay* at the top of the
+/// entry block.
+///
+/// The ARGUMENTS physical register keeps these instructions pinned in place
+/// during liveness-aware CodeGen passes, however one thing which does not
+/// respect this is the ScheduleDAG scheduler. This pass is therefore run
+/// immediately after that.
+///
+/// This is all hopefully a temporary solution until we find a better solution
+/// for describing the live-in nature of arguments.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-argument-move"
+
+namespace {
+class WebAssemblyArgumentMove final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyArgumentMove() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override { return "WebAssembly Argument Move"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblyArgumentMove::ID = 0;
+FunctionPass *llvm::createWebAssemblyArgumentMove() {
+ return new WebAssemblyArgumentMove();
+}
+
+bool WebAssemblyArgumentMove::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Argument Move **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ bool Changed = false;
+ MachineBasicBlock &EntryMBB = MF.front();
+ MachineBasicBlock::iterator InsertPt = EntryMBB.end();
+
+ // Look for the first NonArg instruction.
+ for (MachineInstr &MI : EntryMBB) {
+ if (!WebAssembly::isArgument(MI)) {
+ InsertPt = MI;
+ break;
+ }
+ }
+
+ // Now move any argument instructions later in the block
+ // to before our first NonArg instruction.
+ for (MachineInstr &MI : llvm::make_range(InsertPt, EntryMBB.end())) {
+ if (WebAssembly::isArgument(MI)) {
+ EntryMBB.insert(InsertPt, MI.removeFromParent());
+ Changed = true;
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
new file mode 100644
index 000000000000..204d97cbdd44
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp
@@ -0,0 +1,286 @@
+//===-- WebAssemblyAsmPrinter.cpp - WebAssembly LLVM assembly writer ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains a printer that converts from our internal
+/// representation of machine-dependent LLVM code to the WebAssembly assembly
+/// language.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyAsmPrinter.h"
+#include "InstPrinter/WebAssemblyInstPrinter.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "MCTargetDesc/WebAssemblyTargetStreamer.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMCInstLower.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblyRegisterInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+MVT WebAssemblyAsmPrinter::getRegType(unsigned RegNo) const {
+ const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const TargetRegisterClass *TRC = MRI->getRegClass(RegNo);
+ for (MVT T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64, MVT::v16i8, MVT::v8i16,
+ MVT::v4i32, MVT::v4f32})
+ if (TRI->isTypeLegalForClass(*TRC, T))
+ return T;
+ DEBUG(errs() << "Unknown type for register number: " << RegNo);
+ llvm_unreachable("Unknown register type");
+ return MVT::Other;
+}
+
+std::string WebAssemblyAsmPrinter::regToString(const MachineOperand &MO) {
+ unsigned RegNo = MO.getReg();
+ assert(TargetRegisterInfo::isVirtualRegister(RegNo) &&
+ "Unlowered physical register encountered during assembly printing");
+ assert(!MFI->isVRegStackified(RegNo));
+ unsigned WAReg = MFI->getWAReg(RegNo);
+ assert(WAReg != WebAssemblyFunctionInfo::UnusedReg);
+ return '$' + utostr(WAReg);
+}
+
+WebAssemblyTargetStreamer *WebAssemblyAsmPrinter::getTargetStreamer() {
+ MCTargetStreamer *TS = OutStreamer->getTargetStreamer();
+ return static_cast<WebAssemblyTargetStreamer *>(TS);
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssemblyAsmPrinter Implementation.
+//===----------------------------------------------------------------------===//
+
+void WebAssemblyAsmPrinter::EmitEndOfAsmFile(Module &M) {
+ for (const auto &F : M) {
+ // Emit function type info for all undefined functions
+ if (F.isDeclarationForLinker() && !F.isIntrinsic()) {
+ SmallVector<MVT, 4> Results;
+ SmallVector<MVT, 4> Params;
+ ComputeSignatureVTs(F, TM, Params, Results);
+ getTargetStreamer()->emitIndirectFunctionType(getSymbol(&F), Params,
+ Results);
+ }
+ }
+ for (const auto &G : M.globals()) {
+ if (!G.hasInitializer() && G.hasExternalLinkage()) {
+ if (G.getValueType()->isSized()) {
+ uint16_t Size = M.getDataLayout().getTypeAllocSize(G.getValueType());
+ if (TM.getTargetTriple().isOSBinFormatELF())
+ getTargetStreamer()->emitGlobalImport(G.getGlobalIdentifier());
+ OutStreamer->emitELFSize(getSymbol(&G),
+ MCConstantExpr::create(Size, OutContext));
+ }
+ }
+ }
+}
+
+void WebAssemblyAsmPrinter::EmitConstantPool() {
+ assert(MF->getConstantPool()->getConstants().empty() &&
+ "WebAssembly disables constant pools");
+}
+
+void WebAssemblyAsmPrinter::EmitJumpTableInfo() {
+ // Nothing to do; jump tables are incorporated into the instruction stream.
+}
+
+void WebAssemblyAsmPrinter::EmitFunctionBodyStart() {
+ getTargetStreamer()->emitParam(CurrentFnSym, MFI->getParams());
+
+ SmallVector<MVT, 4> ResultVTs;
+ const Function &F = MF->getFunction();
+
+ // Emit the function index.
+ if (MDNode *Idx = F.getMetadata("wasm.index")) {
+ assert(Idx->getNumOperands() == 1);
+
+ getTargetStreamer()->emitIndIdx(AsmPrinter::lowerConstant(
+ cast<ConstantAsMetadata>(Idx->getOperand(0))->getValue()));
+ }
+
+ ComputeLegalValueVTs(F, TM, F.getReturnType(), ResultVTs);
+
+ // If the return type needs to be legalized it will get converted into
+ // passing a pointer.
+ if (ResultVTs.size() == 1)
+ getTargetStreamer()->emitResult(CurrentFnSym, ResultVTs);
+ else
+ getTargetStreamer()->emitResult(CurrentFnSym, ArrayRef<MVT>());
+
+ if (TM.getTargetTriple().isOSBinFormatELF()) {
+ assert(MFI->getLocals().empty());
+ for (unsigned Idx = 0, IdxE = MRI->getNumVirtRegs(); Idx != IdxE; ++Idx) {
+ unsigned VReg = TargetRegisterInfo::index2VirtReg(Idx);
+ unsigned WAReg = MFI->getWAReg(VReg);
+ // Don't declare unused registers.
+ if (WAReg == WebAssemblyFunctionInfo::UnusedReg)
+ continue;
+ // Don't redeclare parameters.
+ if (WAReg < MFI->getParams().size())
+ continue;
+ // Don't declare stackified registers.
+ if (int(WAReg) < 0)
+ continue;
+ MFI->addLocal(getRegType(VReg));
+ }
+ }
+
+ getTargetStreamer()->emitLocal(MFI->getLocals());
+
+ AsmPrinter::EmitFunctionBodyStart();
+}
+
+void WebAssemblyAsmPrinter::EmitFunctionBodyEnd() {
+ if (TM.getTargetTriple().isOSBinFormatELF())
+ getTargetStreamer()->emitEndFunc();
+}
+
+void WebAssemblyAsmPrinter::EmitInstruction(const MachineInstr *MI) {
+ DEBUG(dbgs() << "EmitInstruction: " << *MI << '\n');
+
+ switch (MI->getOpcode()) {
+ case WebAssembly::ARGUMENT_I32:
+ case WebAssembly::ARGUMENT_I64:
+ case WebAssembly::ARGUMENT_F32:
+ case WebAssembly::ARGUMENT_F64:
+ case WebAssembly::ARGUMENT_v16i8:
+ case WebAssembly::ARGUMENT_v8i16:
+ case WebAssembly::ARGUMENT_v4i32:
+ case WebAssembly::ARGUMENT_v4f32:
+ // These represent values which are live into the function entry, so there's
+ // no instruction to emit.
+ break;
+ case WebAssembly::FALLTHROUGH_RETURN_I32:
+ case WebAssembly::FALLTHROUGH_RETURN_I64:
+ case WebAssembly::FALLTHROUGH_RETURN_F32:
+ case WebAssembly::FALLTHROUGH_RETURN_F64:
+ case WebAssembly::FALLTHROUGH_RETURN_v16i8:
+ case WebAssembly::FALLTHROUGH_RETURN_v8i16:
+ case WebAssembly::FALLTHROUGH_RETURN_v4i32:
+ case WebAssembly::FALLTHROUGH_RETURN_v4f32: {
+ // These instructions represent the implicit return at the end of a
+ // function body. The operand is always a pop.
+ assert(MFI->isVRegStackified(MI->getOperand(0).getReg()));
+
+ if (isVerbose()) {
+ OutStreamer->AddComment("fallthrough-return: $pop" +
+ Twine(MFI->getWARegStackId(
+ MFI->getWAReg(MI->getOperand(0).getReg()))));
+ OutStreamer->AddBlankLine();
+ }
+ break;
+ }
+ case WebAssembly::FALLTHROUGH_RETURN_VOID:
+ // This instruction represents the implicit return at the end of a
+ // function body with no return value.
+ if (isVerbose()) {
+ OutStreamer->AddComment("fallthrough-return");
+ OutStreamer->AddBlankLine();
+ }
+ break;
+ default: {
+ WebAssemblyMCInstLower MCInstLowering(OutContext, *this);
+ MCInst TmpInst;
+ MCInstLowering.Lower(MI, TmpInst);
+ EmitToStreamer(*OutStreamer, TmpInst);
+ break;
+ }
+ }
+}
+
+const MCExpr *WebAssemblyAsmPrinter::lowerConstant(const Constant *CV) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV))
+ if (GV->getValueType()->isFunctionTy()) {
+ return MCSymbolRefExpr::create(
+ getSymbol(GV), MCSymbolRefExpr::VK_WebAssembly_FUNCTION, OutContext);
+ }
+ return AsmPrinter::lowerConstant(CV);
+}
+
+bool WebAssemblyAsmPrinter::PrintAsmOperand(const MachineInstr *MI,
+ unsigned OpNo, unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ if (AsmVariant != 0)
+ report_fatal_error("There are no defined alternate asm variants");
+
+ // First try the generic code, which knows about modifiers like 'c' and 'n'.
+ if (!AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, OS))
+ return false;
+
+ if (!ExtraCode) {
+ const MachineOperand &MO = MI->getOperand(OpNo);
+ switch (MO.getType()) {
+ case MachineOperand::MO_Immediate:
+ OS << MO.getImm();
+ return false;
+ case MachineOperand::MO_Register:
+ OS << regToString(MO);
+ return false;
+ case MachineOperand::MO_GlobalAddress:
+ getSymbol(MO.getGlobal())->print(OS, MAI);
+ printOffset(MO.getOffset(), OS);
+ return false;
+ case MachineOperand::MO_ExternalSymbol:
+ GetExternalSymbolSymbol(MO.getSymbolName())->print(OS, MAI);
+ printOffset(MO.getOffset(), OS);
+ return false;
+ case MachineOperand::MO_MachineBasicBlock:
+ MO.getMBB()->getSymbol()->print(OS, MAI);
+ return false;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool WebAssemblyAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &OS) {
+ if (AsmVariant != 0)
+ report_fatal_error("There are no defined alternate asm variants");
+
+ // The current approach to inline asm is that "r" constraints are expressed
+ // as local indices, rather than values on the operand stack. This simplifies
+ // using "r" as it eliminates the need to push and pop the values in a
+ // particular order, however it also makes it impossible to have an "m"
+ // constraint. So we don't support it.
+
+ return AsmPrinter::PrintAsmMemoryOperand(MI, OpNo, AsmVariant, ExtraCode, OS);
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeWebAssemblyAsmPrinter() {
+ RegisterAsmPrinter<WebAssemblyAsmPrinter> X(getTheWebAssemblyTarget32());
+ RegisterAsmPrinter<WebAssemblyAsmPrinter> Y(getTheWebAssemblyTarget64());
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h
new file mode 100644
index 000000000000..a37f8bcf6ba5
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyAsmPrinter.h
@@ -0,0 +1,77 @@
+// WebAssemblyAsmPrinter.h - WebAssembly implementation of AsmPrinter-*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYASMPRINTER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYASMPRINTER_H
+
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class MCSymbol;
+class WebAssemblyTargetStreamer;
+class WebAssemblyMCInstLower;
+
+class LLVM_LIBRARY_VISIBILITY WebAssemblyAsmPrinter final : public AsmPrinter {
+ const WebAssemblySubtarget *Subtarget;
+ const MachineRegisterInfo *MRI;
+ WebAssemblyFunctionInfo *MFI;
+
+public:
+ explicit WebAssemblyAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)),
+ Subtarget(nullptr), MRI(nullptr), MFI(nullptr) {}
+
+ StringRef getPassName() const override {
+ return "WebAssembly Assembly Printer";
+ }
+
+ const WebAssemblySubtarget &getSubtarget() const { return *Subtarget; }
+
+ //===------------------------------------------------------------------===//
+ // MachineFunctionPass Implementation.
+ //===------------------------------------------------------------------===//
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<WebAssemblySubtarget>();
+ MRI = &MF.getRegInfo();
+ MFI = MF.getInfo<WebAssemblyFunctionInfo>();
+ return AsmPrinter::runOnMachineFunction(MF);
+ }
+
+ //===------------------------------------------------------------------===//
+ // AsmPrinter Implementation.
+ //===------------------------------------------------------------------===//
+
+ void EmitEndOfAsmFile(Module &M) override;
+ void EmitJumpTableInfo() override;
+ void EmitConstantPool() override;
+ void EmitFunctionBodyStart() override;
+ void EmitFunctionBodyEnd() override;
+ void EmitInstruction(const MachineInstr *MI) override;
+ const MCExpr *lowerConstant(const Constant *CV) override;
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS) override;
+
+ MVT getRegType(unsigned RegNo) const;
+ std::string regToString(const MachineOperand &MO);
+ WebAssemblyTargetStreamer *getTargetStreamer();
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
new file mode 100644
index 000000000000..700111743ee8
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGSort.cpp
@@ -0,0 +1,277 @@
+//===-- WebAssemblyCFGSort.cpp - CFG Sorting ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a CFG sorting pass.
+///
+/// This pass reorders the blocks in a function to put them into topological
+/// order, ignoring loop backedges, and without any loop being interrupted
+/// by a block not dominated by the loop header, with special care to keep the
+/// order as similar as possible to the original order.
+///
+////===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/PriorityQueue.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-cfg-sort"
+
+namespace {
+class WebAssemblyCFGSort final : public MachineFunctionPass {
+ StringRef getPassName() const override { return "WebAssembly CFG Sort"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyCFGSort() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyCFGSort::ID = 0;
+FunctionPass *llvm::createWebAssemblyCFGSort() {
+ return new WebAssemblyCFGSort();
+}
+
+static void MaybeUpdateTerminator(MachineBasicBlock *MBB) {
+#ifndef NDEBUG
+ bool AnyBarrier = false;
+#endif
+ bool AllAnalyzable = true;
+ for (const MachineInstr &Term : MBB->terminators()) {
+#ifndef NDEBUG
+ AnyBarrier |= Term.isBarrier();
+#endif
+ AllAnalyzable &= Term.isBranch() && !Term.isIndirectBranch();
+ }
+ assert((AnyBarrier || AllAnalyzable) &&
+ "AnalyzeBranch needs to analyze any block with a fallthrough");
+ if (AllAnalyzable)
+ MBB->updateTerminator();
+}
+
+namespace {
+/// Sort blocks by their number.
+struct CompareBlockNumbers {
+ bool operator()(const MachineBasicBlock *A,
+ const MachineBasicBlock *B) const {
+ return A->getNumber() > B->getNumber();
+ }
+};
+/// Sort blocks by their number in the opposite order..
+struct CompareBlockNumbersBackwards {
+ bool operator()(const MachineBasicBlock *A,
+ const MachineBasicBlock *B) const {
+ return A->getNumber() < B->getNumber();
+ }
+};
+/// Bookkeeping for a loop to help ensure that we don't mix blocks not dominated
+/// by the loop header among the loop's blocks.
+struct Entry {
+ const MachineLoop *Loop;
+ unsigned NumBlocksLeft;
+
+ /// List of blocks not dominated by Loop's header that are deferred until
+ /// after all of Loop's blocks have been seen.
+ std::vector<MachineBasicBlock *> Deferred;
+
+ explicit Entry(const MachineLoop *L)
+ : Loop(L), NumBlocksLeft(L->getNumBlocks()) {}
+};
+} // end anonymous namespace
+
+/// Sort the blocks, taking special care to make sure that loops are not
+/// interrupted by blocks not dominated by their header.
+/// TODO: There are many opportunities for improving the heuristics here.
+/// Explore them.
+static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
+ const MachineDominatorTree &MDT) {
+ // Prepare for a topological sort: Record the number of predecessors each
+ // block has, ignoring loop backedges.
+ MF.RenumberBlocks();
+ SmallVector<unsigned, 16> NumPredsLeft(MF.getNumBlockIDs(), 0);
+ for (MachineBasicBlock &MBB : MF) {
+ unsigned N = MBB.pred_size();
+ if (MachineLoop *L = MLI.getLoopFor(&MBB))
+ if (L->getHeader() == &MBB)
+ for (const MachineBasicBlock *Pred : MBB.predecessors())
+ if (L->contains(Pred))
+ --N;
+ NumPredsLeft[MBB.getNumber()] = N;
+ }
+
+ // Topological sort the CFG, with additional constraints:
+ // - Between a loop header and the last block in the loop, there can be
+ // no blocks not dominated by the loop header.
+ // - It's desirable to preserve the original block order when possible.
+ // We use two ready lists; Preferred and Ready. Preferred has recently
+ // processed successors, to help preserve block sequences from the original
+ // order. Ready has the remaining ready blocks.
+ PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
+ CompareBlockNumbers>
+ Preferred;
+ PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
+ CompareBlockNumbersBackwards>
+ Ready;
+ SmallVector<Entry, 4> Loops;
+ for (MachineBasicBlock *MBB = &MF.front();;) {
+ const MachineLoop *L = MLI.getLoopFor(MBB);
+ if (L) {
+ // If MBB is a loop header, add it to the active loop list. We can't put
+ // any blocks that it doesn't dominate until we see the end of the loop.
+ if (L->getHeader() == MBB)
+ Loops.push_back(Entry(L));
+ // For each active loop the block is in, decrement the count. If MBB is
+ // the last block in an active loop, take it off the list and pick up any
+ // blocks deferred because the header didn't dominate them.
+ for (Entry &E : Loops)
+ if (E.Loop->contains(MBB) && --E.NumBlocksLeft == 0)
+ for (auto DeferredBlock : E.Deferred)
+ Ready.push(DeferredBlock);
+ while (!Loops.empty() && Loops.back().NumBlocksLeft == 0)
+ Loops.pop_back();
+ }
+ // The main topological sort logic.
+ for (MachineBasicBlock *Succ : MBB->successors()) {
+ // Ignore backedges.
+ if (MachineLoop *SuccL = MLI.getLoopFor(Succ))
+ if (SuccL->getHeader() == Succ && SuccL->contains(MBB))
+ continue;
+ // Decrement the predecessor count. If it's now zero, it's ready.
+ if (--NumPredsLeft[Succ->getNumber()] == 0)
+ Preferred.push(Succ);
+ }
+ // Determine the block to follow MBB. First try to find a preferred block,
+ // to preserve the original block order when possible.
+ MachineBasicBlock *Next = nullptr;
+ while (!Preferred.empty()) {
+ Next = Preferred.top();
+ Preferred.pop();
+ // If X isn't dominated by the top active loop header, defer it until that
+ // loop is done.
+ if (!Loops.empty() &&
+ !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
+ Loops.back().Deferred.push_back(Next);
+ Next = nullptr;
+ continue;
+ }
+ // If Next was originally ordered before MBB, and it isn't because it was
+ // loop-rotated above the header, it's not preferred.
+ if (Next->getNumber() < MBB->getNumber() &&
+ (!L || !L->contains(Next) ||
+ L->getHeader()->getNumber() < Next->getNumber())) {
+ Ready.push(Next);
+ Next = nullptr;
+ continue;
+ }
+ break;
+ }
+ // If we didn't find a suitable block in the Preferred list, check the
+ // general Ready list.
+ if (!Next) {
+ // If there are no more blocks to process, we're done.
+ if (Ready.empty()) {
+ MaybeUpdateTerminator(MBB);
+ break;
+ }
+ for (;;) {
+ Next = Ready.top();
+ Ready.pop();
+ // If Next isn't dominated by the top active loop header, defer it until
+ // that loop is done.
+ if (!Loops.empty() &&
+ !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
+ Loops.back().Deferred.push_back(Next);
+ continue;
+ }
+ break;
+ }
+ }
+ // Move the next block into place and iterate.
+ Next->moveAfter(MBB);
+ MaybeUpdateTerminator(MBB);
+ MBB = Next;
+ }
+ assert(Loops.empty() && "Active loop list not finished");
+ MF.RenumberBlocks();
+
+#ifndef NDEBUG
+ SmallSetVector<MachineLoop *, 8> OnStack;
+
+ // Insert a sentinel representing the degenerate loop that starts at the
+ // function entry block and includes the entire function as a "loop" that
+ // executes once.
+ OnStack.insert(nullptr);
+
+ for (auto &MBB : MF) {
+ assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
+
+ MachineLoop *Loop = MLI.getLoopFor(&MBB);
+ if (Loop && &MBB == Loop->getHeader()) {
+ // Loop header. The loop predecessor should be sorted above, and the other
+ // predecessors should be backedges below.
+ for (auto Pred : MBB.predecessors())
+ assert(
+ (Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) &&
+ "Loop header predecessors must be loop predecessors or backedges");
+ assert(OnStack.insert(Loop) && "Loops should be declared at most once.");
+ } else {
+ // Not a loop header. All predecessors should be sorted above.
+ for (auto Pred : MBB.predecessors())
+ assert(Pred->getNumber() < MBB.getNumber() &&
+ "Non-loop-header predecessors should be topologically sorted");
+ assert(OnStack.count(MLI.getLoopFor(&MBB)) &&
+ "Blocks must be nested in their loops");
+ }
+ while (OnStack.size() > 1 && &MBB == LoopBottom(OnStack.back()))
+ OnStack.pop_back();
+ }
+ assert(OnStack.pop_back_val() == nullptr &&
+ "The function entry block shouldn't actually be a loop header");
+ assert(OnStack.empty() &&
+ "Control flow stack pushes and pops should be balanced.");
+#endif
+}
+
+bool WebAssemblyCFGSort::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** CFG Sorting **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ const auto &MLI = getAnalysis<MachineLoopInfo>();
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ // Liveness is not tracked for VALUE_STACK physreg.
+ MF.getRegInfo().invalidateLiveness();
+
+ // Sort the blocks, with contiguous loops.
+ SortBlocks(MF, MLI, MDT);
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
new file mode 100644
index 000000000000..21e0f6b23777
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp
@@ -0,0 +1,372 @@
+//===-- WebAssemblyCFGStackify.cpp - CFG Stackification -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a CFG stacking pass.
+///
+/// This pass inserts BLOCK and LOOP markers to mark the start of scopes, since
+/// scope boundaries serve as the labels for WebAssembly's control transfers.
+///
+/// This is sufficient to convert arbitrary CFGs into a form that works on
+/// WebAssembly, provided that all loops are single-entry.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-cfg-stackify"
+
+namespace {
+class WebAssemblyCFGStackify final : public MachineFunctionPass {
+ StringRef getPassName() const override { return "WebAssembly CFG Stackify"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyCFGStackify() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyCFGStackify::ID = 0;
+FunctionPass *llvm::createWebAssemblyCFGStackify() {
+ return new WebAssemblyCFGStackify();
+}
+
+/// Test whether Pred has any terminators explicitly branching to MBB, as
+/// opposed to falling through. Note that it's possible (eg. in unoptimized
+/// code) for a branch instruction to both branch to a block and fallthrough
+/// to it, so we check the actual branch operands to see if there are any
+/// explicit mentions.
+static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred,
+ MachineBasicBlock *MBB) {
+ for (MachineInstr &MI : Pred->terminators())
+ for (MachineOperand &MO : MI.explicit_operands())
+ if (MO.isMBB() && MO.getMBB() == MBB)
+ return true;
+ return false;
+}
+
+/// Insert a BLOCK marker for branches to MBB (if needed).
+static void PlaceBlockMarker(
+ MachineBasicBlock &MBB, MachineFunction &MF,
+ SmallVectorImpl<MachineBasicBlock *> &ScopeTops,
+ DenseMap<const MachineInstr *, MachineInstr *> &BlockTops,
+ DenseMap<const MachineInstr *, MachineInstr *> &LoopTops,
+ const WebAssemblyInstrInfo &TII,
+ const MachineLoopInfo &MLI,
+ MachineDominatorTree &MDT,
+ WebAssemblyFunctionInfo &MFI) {
+ // First compute the nearest common dominator of all forward non-fallthrough
+ // predecessors so that we minimize the time that the BLOCK is on the stack,
+ // which reduces overall stack height.
+ MachineBasicBlock *Header = nullptr;
+ bool IsBranchedTo = false;
+ int MBBNumber = MBB.getNumber();
+ for (MachineBasicBlock *Pred : MBB.predecessors())
+ if (Pred->getNumber() < MBBNumber) {
+ Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
+ if (ExplicitlyBranchesTo(Pred, &MBB))
+ IsBranchedTo = true;
+ }
+ if (!Header)
+ return;
+ if (!IsBranchedTo)
+ return;
+
+ assert(&MBB != &MF.front() && "Header blocks shouldn't have predecessors");
+ MachineBasicBlock *LayoutPred = &*std::prev(MachineFunction::iterator(&MBB));
+
+ // If the nearest common dominator is inside a more deeply nested context,
+ // walk out to the nearest scope which isn't more deeply nested.
+ for (MachineFunction::iterator I(LayoutPred), E(Header); I != E; --I) {
+ if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) {
+ if (ScopeTop->getNumber() > Header->getNumber()) {
+ // Skip over an intervening scope.
+ I = std::next(MachineFunction::iterator(ScopeTop));
+ } else {
+ // We found a scope level at an appropriate depth.
+ Header = ScopeTop;
+ break;
+ }
+ }
+ }
+
+ // Decide where in Header to put the BLOCK.
+ MachineBasicBlock::iterator InsertPos;
+ MachineLoop *HeaderLoop = MLI.getLoopFor(Header);
+ if (HeaderLoop && MBB.getNumber() > LoopBottom(HeaderLoop)->getNumber()) {
+ // Header is the header of a loop that does not lexically contain MBB, so
+ // the BLOCK needs to be above the LOOP, after any END constructs.
+ InsertPos = Header->begin();
+ while (InsertPos->getOpcode() == WebAssembly::END_BLOCK ||
+ InsertPos->getOpcode() == WebAssembly::END_LOOP)
+ ++InsertPos;
+ } else {
+ // Otherwise, insert the BLOCK as late in Header as we can, but before the
+ // beginning of the local expression tree and any nested BLOCKs.
+ InsertPos = Header->getFirstTerminator();
+ while (InsertPos != Header->begin() &&
+ WebAssembly::isChild(*std::prev(InsertPos), MFI) &&
+ std::prev(InsertPos)->getOpcode() != WebAssembly::LOOP &&
+ std::prev(InsertPos)->getOpcode() != WebAssembly::END_BLOCK &&
+ std::prev(InsertPos)->getOpcode() != WebAssembly::END_LOOP)
+ --InsertPos;
+ }
+
+ // Add the BLOCK.
+ MachineInstr *Begin = BuildMI(*Header, InsertPos, DebugLoc(),
+ TII.get(WebAssembly::BLOCK))
+ .addImm(int64_t(WebAssembly::ExprType::Void));
+
+ // Mark the end of the block.
+ InsertPos = MBB.begin();
+ while (InsertPos != MBB.end() &&
+ InsertPos->getOpcode() == WebAssembly::END_LOOP &&
+ LoopTops[&*InsertPos]->getParent()->getNumber() >= Header->getNumber())
+ ++InsertPos;
+ MachineInstr *End = BuildMI(MBB, InsertPos, DebugLoc(),
+ TII.get(WebAssembly::END_BLOCK));
+ BlockTops[End] = Begin;
+
+ // Track the farthest-spanning scope that ends at this point.
+ int Number = MBB.getNumber();
+ if (!ScopeTops[Number] ||
+ ScopeTops[Number]->getNumber() > Header->getNumber())
+ ScopeTops[Number] = Header;
+}
+
+/// Insert a LOOP marker for a loop starting at MBB (if it's a loop header).
+static void PlaceLoopMarker(
+ MachineBasicBlock &MBB, MachineFunction &MF,
+ SmallVectorImpl<MachineBasicBlock *> &ScopeTops,
+ DenseMap<const MachineInstr *, MachineInstr *> &LoopTops,
+ const WebAssemblyInstrInfo &TII, const MachineLoopInfo &MLI) {
+ MachineLoop *Loop = MLI.getLoopFor(&MBB);
+ if (!Loop || Loop->getHeader() != &MBB)
+ return;
+
+ // The operand of a LOOP is the first block after the loop. If the loop is the
+ // bottom of the function, insert a dummy block at the end.
+ MachineBasicBlock *Bottom = LoopBottom(Loop);
+ auto Iter = std::next(MachineFunction::iterator(Bottom));
+ if (Iter == MF.end()) {
+ MachineBasicBlock *Label = MF.CreateMachineBasicBlock();
+ // Give it a fake predecessor so that AsmPrinter prints its label.
+ Label->addSuccessor(Label);
+ MF.push_back(Label);
+ Iter = std::next(MachineFunction::iterator(Bottom));
+ }
+ MachineBasicBlock *AfterLoop = &*Iter;
+
+ // Mark the beginning of the loop (after the end of any existing loop that
+ // ends here).
+ auto InsertPos = MBB.begin();
+ while (InsertPos != MBB.end() &&
+ InsertPos->getOpcode() == WebAssembly::END_LOOP)
+ ++InsertPos;
+ MachineInstr *Begin = BuildMI(MBB, InsertPos, DebugLoc(),
+ TII.get(WebAssembly::LOOP))
+ .addImm(int64_t(WebAssembly::ExprType::Void));
+
+ // Mark the end of the loop.
+ MachineInstr *End = BuildMI(*AfterLoop, AfterLoop->begin(), DebugLoc(),
+ TII.get(WebAssembly::END_LOOP));
+ LoopTops[End] = Begin;
+
+ assert((!ScopeTops[AfterLoop->getNumber()] ||
+ ScopeTops[AfterLoop->getNumber()]->getNumber() < MBB.getNumber()) &&
+ "With block sorting the outermost loop for a block should be first.");
+ if (!ScopeTops[AfterLoop->getNumber()])
+ ScopeTops[AfterLoop->getNumber()] = &MBB;
+}
+
+static unsigned
+GetDepth(const SmallVectorImpl<const MachineBasicBlock *> &Stack,
+ const MachineBasicBlock *MBB) {
+ unsigned Depth = 0;
+ for (auto X : reverse(Stack)) {
+ if (X == MBB)
+ break;
+ ++Depth;
+ }
+ assert(Depth < Stack.size() && "Branch destination should be in scope");
+ return Depth;
+}
+
+/// In normal assembly languages, when the end of a function is unreachable,
+/// because the function ends in an infinite loop or a noreturn call or similar,
+/// it isn't necessary to worry about the function return type at the end of
+/// the function, because it's never reached. However, in WebAssembly, blocks
+/// that end at the function end need to have a return type signature that
+/// matches the function signature, even though it's unreachable. This function
+/// checks for such cases and fixes up the signatures.
+static void FixEndsAtEndOfFunction(
+ MachineFunction &MF,
+ const WebAssemblyFunctionInfo &MFI,
+ DenseMap<const MachineInstr *, MachineInstr *> &BlockTops,
+ DenseMap<const MachineInstr *, MachineInstr *> &LoopTops) {
+ assert(MFI.getResults().size() <= 1);
+
+ if (MFI.getResults().empty())
+ return;
+
+ WebAssembly::ExprType retType;
+ switch (MFI.getResults().front().SimpleTy) {
+ case MVT::i32: retType = WebAssembly::ExprType::I32; break;
+ case MVT::i64: retType = WebAssembly::ExprType::I64; break;
+ case MVT::f32: retType = WebAssembly::ExprType::F32; break;
+ case MVT::f64: retType = WebAssembly::ExprType::F64; break;
+ case MVT::v16i8: retType = WebAssembly::ExprType::I8x16; break;
+ case MVT::v8i16: retType = WebAssembly::ExprType::I16x8; break;
+ case MVT::v4i32: retType = WebAssembly::ExprType::I32x4; break;
+ case MVT::v4f32: retType = WebAssembly::ExprType::F32x4; break;
+ default: llvm_unreachable("unexpected return type");
+ }
+
+ for (MachineBasicBlock &MBB : reverse(MF)) {
+ for (MachineInstr &MI : reverse(MBB)) {
+ if (MI.isPosition() || MI.isDebugValue())
+ continue;
+ if (MI.getOpcode() == WebAssembly::END_BLOCK) {
+ BlockTops[&MI]->getOperand(0).setImm(int32_t(retType));
+ continue;
+ }
+ if (MI.getOpcode() == WebAssembly::END_LOOP) {
+ LoopTops[&MI]->getOperand(0).setImm(int32_t(retType));
+ continue;
+ }
+ // Something other than an `end`. We're done.
+ return;
+ }
+ }
+}
+
+// WebAssembly functions end with an end instruction, as if the function body
+// were a block.
+static void AppendEndToFunction(
+ MachineFunction &MF,
+ const WebAssemblyInstrInfo &TII) {
+ BuildMI(MF.back(), MF.back().end(), DebugLoc(),
+ TII.get(WebAssembly::END_FUNCTION));
+}
+
+/// Insert LOOP and BLOCK markers at appropriate places.
+static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI,
+ const WebAssemblyInstrInfo &TII,
+ MachineDominatorTree &MDT,
+ WebAssemblyFunctionInfo &MFI) {
+ // For each block whose label represents the end of a scope, record the block
+ // which holds the beginning of the scope. This will allow us to quickly skip
+ // over scoped regions when walking blocks. We allocate one more than the
+ // number of blocks in the function to accommodate for the possible fake block
+ // we may insert at the end.
+ SmallVector<MachineBasicBlock *, 8> ScopeTops(MF.getNumBlockIDs() + 1);
+
+ // For each LOOP_END, the corresponding LOOP.
+ DenseMap<const MachineInstr *, MachineInstr *> LoopTops;
+
+ // For each END_BLOCK, the corresponding BLOCK.
+ DenseMap<const MachineInstr *, MachineInstr *> BlockTops;
+
+ for (auto &MBB : MF) {
+ // Place the LOOP for MBB if MBB is the header of a loop.
+ PlaceLoopMarker(MBB, MF, ScopeTops, LoopTops, TII, MLI);
+
+ // Place the BLOCK for MBB if MBB is branched to from above.
+ PlaceBlockMarker(MBB, MF, ScopeTops, BlockTops, LoopTops, TII, MLI, MDT, MFI);
+ }
+
+ // Now rewrite references to basic blocks to be depth immediates.
+ SmallVector<const MachineBasicBlock *, 8> Stack;
+ for (auto &MBB : reverse(MF)) {
+ for (auto &MI : reverse(MBB)) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::BLOCK:
+ assert(ScopeTops[Stack.back()->getNumber()]->getNumber() <= MBB.getNumber() &&
+ "Block should be balanced");
+ Stack.pop_back();
+ break;
+ case WebAssembly::LOOP:
+ assert(Stack.back() == &MBB && "Loop top should be balanced");
+ Stack.pop_back();
+ break;
+ case WebAssembly::END_BLOCK:
+ Stack.push_back(&MBB);
+ break;
+ case WebAssembly::END_LOOP:
+ Stack.push_back(LoopTops[&MI]->getParent());
+ break;
+ default:
+ if (MI.isTerminator()) {
+ // Rewrite MBB operands to be depth immediates.
+ SmallVector<MachineOperand, 4> Ops(MI.operands());
+ while (MI.getNumOperands() > 0)
+ MI.RemoveOperand(MI.getNumOperands() - 1);
+ for (auto MO : Ops) {
+ if (MO.isMBB())
+ MO = MachineOperand::CreateImm(GetDepth(Stack, MO.getMBB()));
+ MI.addOperand(MF, MO);
+ }
+ }
+ break;
+ }
+ }
+ }
+ assert(Stack.empty() && "Control flow should be balanced");
+
+ // Fix up block/loop signatures at the end of the function to conform to
+ // WebAssembly's rules.
+ FixEndsAtEndOfFunction(MF, MFI, BlockTops, LoopTops);
+
+ // Add an end instruction at the end of the function body.
+ if (!MF.getSubtarget<WebAssemblySubtarget>()
+ .getTargetTriple().isOSBinFormatELF())
+ AppendEndToFunction(MF, TII);
+}
+
+bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** CFG Stackifying **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ const auto &MLI = getAnalysis<MachineLoopInfo>();
+ auto &MDT = getAnalysis<MachineDominatorTree>();
+ // Liveness is not tracked for VALUE_STACK physreg.
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ MF.getRegInfo().invalidateLiveness();
+
+ // Place the BLOCK and LOOP markers to indicate the beginnings of scopes.
+ PlaceMarkers(MF, MLI, TII, MDT, MFI);
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
new file mode 100644
index 000000000000..1af92f02d8e0
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyCallIndirectFixup.cpp
@@ -0,0 +1,133 @@
+//===-- WebAssemblyCallIndirectFixup.cpp - Fix call_indirects -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file converts pseudo call_indirect instructions into real
+/// call_indirects.
+///
+/// The order of arguments for a call_indirect is the arguments to the function
+/// call, followed by the function pointer. There's no natural way to express
+/// a machineinstr with varargs followed by one more arg, so we express it as
+/// the function pointer followed by varargs, then rewrite it here.
+///
+/// We need to rewrite the order of the arguments on the machineinstrs
+/// themselves so that register stackification knows the order they'll be
+/// executed in.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" // for WebAssembly::ARGUMENT_*
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-call-indirect-fixup"
+
+namespace {
+class WebAssemblyCallIndirectFixup final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly CallIndirect Fixup";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyCallIndirectFixup() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyCallIndirectFixup::ID = 0;
+FunctionPass *llvm::createWebAssemblyCallIndirectFixup() {
+ return new WebAssemblyCallIndirectFixup();
+}
+
+static unsigned GetNonPseudoCallIndirectOpcode(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ using namespace WebAssembly;
+ case PCALL_INDIRECT_VOID: return CALL_INDIRECT_VOID;
+ case PCALL_INDIRECT_I32: return CALL_INDIRECT_I32;
+ case PCALL_INDIRECT_I64: return CALL_INDIRECT_I64;
+ case PCALL_INDIRECT_F32: return CALL_INDIRECT_F32;
+ case PCALL_INDIRECT_F64: return CALL_INDIRECT_F64;
+ case PCALL_INDIRECT_v16i8: return CALL_INDIRECT_v16i8;
+ case PCALL_INDIRECT_v8i16: return CALL_INDIRECT_v8i16;
+ case PCALL_INDIRECT_v4i32: return CALL_INDIRECT_v4i32;
+ case PCALL_INDIRECT_v4f32: return CALL_INDIRECT_v4f32;
+ default: return INSTRUCTION_LIST_END;
+ }
+}
+
+static bool IsPseudoCallIndirect(const MachineInstr &MI) {
+ return GetNonPseudoCallIndirectOpcode(MI) !=
+ WebAssembly::INSTRUCTION_LIST_END;
+}
+
+bool WebAssemblyCallIndirectFixup::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Fixing up CALL_INDIRECTs **********\n"
+ << MF.getName() << '\n');
+
+ bool Changed = false;
+ const WebAssemblyInstrInfo *TII =
+ MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ if (IsPseudoCallIndirect(MI)) {
+ DEBUG(dbgs() << "Found call_indirect: " << MI << '\n');
+
+ // Rewrite pseudo to non-pseudo
+ const MCInstrDesc &Desc = TII->get(GetNonPseudoCallIndirectOpcode(MI));
+ MI.setDesc(Desc);
+
+ // Rewrite argument order
+ SmallVector<MachineOperand, 8> Ops;
+
+ // Set up a placeholder for the type signature immediate.
+ Ops.push_back(MachineOperand::CreateImm(0));
+
+ // Set up the flags immediate, which currently has no defined flags
+ // so it's always zero.
+ Ops.push_back(MachineOperand::CreateImm(0));
+
+ for (const MachineOperand &MO :
+ make_range(MI.operands_begin() +
+ MI.getDesc().getNumDefs() + 1,
+ MI.operands_begin() +
+ MI.getNumExplicitOperands()))
+ Ops.push_back(MO);
+ Ops.push_back(MI.getOperand(MI.getDesc().getNumDefs()));
+
+ // Replace the instructions operands.
+ while (MI.getNumOperands() > MI.getDesc().getNumDefs())
+ MI.RemoveOperand(MI.getNumOperands() - 1);
+ for (const MachineOperand &MO : Ops)
+ MI.addOperand(MO);
+
+ DEBUG(dbgs() << " After transform: " << MI);
+ Changed = true;
+ }
+ }
+ }
+
+ DEBUG(dbgs() << "\nDone fixing up CALL_INDIRECTs\n\n");
+
+ return Changed;
+}
+
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
new file mode 100644
index 000000000000..e2edb924d4d2
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp
@@ -0,0 +1,378 @@
+//===-- WebAssemblyExplicitLocals.cpp - Make Locals Explicit --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file converts any remaining registers into WebAssembly locals.
+///
+/// After register stackification and register coloring, convert non-stackified
+/// registers into locals, inserting explicit get_local and set_local
+/// instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-explicit-locals"
+
+// A command-line option to disable this pass. Note that this produces output
+// which is not valid WebAssembly, though it may be more convenient for writing
+// LLVM unit tests with.
+static cl::opt<bool> DisableWebAssemblyExplicitLocals(
+ "disable-wasm-explicit-locals", cl::ReallyHidden,
+ cl::desc("WebAssembly: Disable emission of get_local/set_local."),
+ cl::init(false));
+
+namespace {
+class WebAssemblyExplicitLocals final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Explicit Locals";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyExplicitLocals() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyExplicitLocals::ID = 0;
+FunctionPass *llvm::createWebAssemblyExplicitLocals() {
+ return new WebAssemblyExplicitLocals();
+}
+
+/// Return a local id number for the given register, assigning it a new one
+/// if it doesn't yet have one.
+static unsigned getLocalId(DenseMap<unsigned, unsigned> &Reg2Local,
+ unsigned &CurLocal, unsigned Reg) {
+ auto P = Reg2Local.insert(std::make_pair(Reg, CurLocal));
+ if (P.second)
+ ++CurLocal;
+ return P.first->second;
+}
+
+/// Get the appropriate drop opcode for the given register class.
+static unsigned getDropOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::DROP_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::DROP_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::DROP_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::DROP_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::DROP_V128;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the appropriate get_local opcode for the given register class.
+static unsigned getGetLocalOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::GET_LOCAL_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::GET_LOCAL_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::GET_LOCAL_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::GET_LOCAL_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::GET_LOCAL_V128;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the appropriate set_local opcode for the given register class.
+static unsigned getSetLocalOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::SET_LOCAL_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::SET_LOCAL_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::SET_LOCAL_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::SET_LOCAL_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::SET_LOCAL_V128;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the appropriate tee_local opcode for the given register class.
+static unsigned getTeeLocalOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::TEE_LOCAL_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::TEE_LOCAL_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::TEE_LOCAL_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::TEE_LOCAL_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::TEE_LOCAL_V128;
+ llvm_unreachable("Unexpected register class");
+}
+
+/// Get the type associated with the given register class.
+static MVT typeForRegClass(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return MVT::i32;
+ if (RC == &WebAssembly::I64RegClass)
+ return MVT::i64;
+ if (RC == &WebAssembly::F32RegClass)
+ return MVT::f32;
+ if (RC == &WebAssembly::F64RegClass)
+ return MVT::f64;
+ llvm_unreachable("unrecognized register class");
+}
+
+/// Given a MachineOperand of a stackified vreg, return the instruction at the
+/// start of the expression tree.
+static MachineInstr *FindStartOfTree(MachineOperand &MO,
+ MachineRegisterInfo &MRI,
+ WebAssemblyFunctionInfo &MFI) {
+ unsigned Reg = MO.getReg();
+ assert(MFI.isVRegStackified(Reg));
+ MachineInstr *Def = MRI.getVRegDef(Reg);
+
+ // Find the first stackified use and proceed from there.
+ for (MachineOperand &DefMO : Def->explicit_uses()) {
+ if (!DefMO.isReg())
+ continue;
+ return FindStartOfTree(DefMO, MRI, MFI);
+ }
+
+ // If there were no stackified uses, we've reached the start.
+ return Def;
+}
+
+bool WebAssemblyExplicitLocals::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Make Locals Explicit **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ // Disable this pass if directed to do so.
+ if (DisableWebAssemblyExplicitLocals)
+ return false;
+
+ // Disable this pass if we aren't doing direct wasm object emission.
+ if (MF.getSubtarget<WebAssemblySubtarget>()
+ .getTargetTriple().isOSBinFormatELF())
+ return false;
+
+ bool Changed = false;
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ // Map non-stackified virtual registers to their local ids.
+ DenseMap<unsigned, unsigned> Reg2Local;
+
+ // Handle ARGUMENTS first to ensure that they get the designated numbers.
+ for (MachineBasicBlock::iterator I = MF.begin()->begin(),
+ E = MF.begin()->end();
+ I != E;) {
+ MachineInstr &MI = *I++;
+ if (!WebAssembly::isArgument(MI))
+ break;
+ unsigned Reg = MI.getOperand(0).getReg();
+ assert(!MFI.isVRegStackified(Reg));
+ Reg2Local[Reg] = MI.getOperand(1).getImm();
+ MI.eraseFromParent();
+ Changed = true;
+ }
+
+ // Start assigning local numbers after the last parameter.
+ unsigned CurLocal = MFI.getParams().size();
+
+ // Precompute the set of registers that are unused, so that we can insert
+ // drops to their defs.
+ BitVector UseEmpty(MRI.getNumVirtRegs());
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i)
+ UseEmpty[i] = MRI.use_empty(TargetRegisterInfo::index2VirtReg(i));
+
+ // Visit each instruction in the function.
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
+ MachineInstr &MI = *I++;
+ assert(!WebAssembly::isArgument(MI));
+
+ if (MI.isDebugValue() || MI.isLabel())
+ continue;
+
+ // Replace tee instructions with tee_local. The difference is that tee
+ // instructins have two defs, while tee_local instructions have one def
+ // and an index of a local to write to.
+ if (WebAssembly::isTee(MI)) {
+ assert(MFI.isVRegStackified(MI.getOperand(0).getReg()));
+ assert(!MFI.isVRegStackified(MI.getOperand(1).getReg()));
+ unsigned OldReg = MI.getOperand(2).getReg();
+ const TargetRegisterClass *RC = MRI.getRegClass(OldReg);
+
+ // Stackify the input if it isn't stackified yet.
+ if (!MFI.isVRegStackified(OldReg)) {
+ unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg);
+ unsigned NewReg = MRI.createVirtualRegister(RC);
+ unsigned Opc = getGetLocalOpcode(RC);
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Opc), NewReg)
+ .addImm(LocalId);
+ MI.getOperand(2).setReg(NewReg);
+ MFI.stackifyVReg(NewReg);
+ }
+
+ // Replace the TEE with a TEE_LOCAL.
+ unsigned LocalId =
+ getLocalId(Reg2Local, CurLocal, MI.getOperand(1).getReg());
+ unsigned Opc = getTeeLocalOpcode(RC);
+ BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(Opc),
+ MI.getOperand(0).getReg())
+ .addImm(LocalId)
+ .addReg(MI.getOperand(2).getReg());
+
+ MI.eraseFromParent();
+ Changed = true;
+ continue;
+ }
+
+ // Insert set_locals for any defs that aren't stackified yet. Currently
+ // we handle at most one def.
+ assert(MI.getDesc().getNumDefs() <= 1);
+ if (MI.getDesc().getNumDefs() == 1) {
+ unsigned OldReg = MI.getOperand(0).getReg();
+ if (!MFI.isVRegStackified(OldReg)) {
+ const TargetRegisterClass *RC = MRI.getRegClass(OldReg);
+ unsigned NewReg = MRI.createVirtualRegister(RC);
+ auto InsertPt = std::next(MachineBasicBlock::iterator(&MI));
+ if (MI.getOpcode() == WebAssembly::IMPLICIT_DEF) {
+ MI.eraseFromParent();
+ Changed = true;
+ continue;
+ }
+ if (UseEmpty[TargetRegisterInfo::virtReg2Index(OldReg)]) {
+ unsigned Opc = getDropOpcode(RC);
+ BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc))
+ .addReg(NewReg);
+ } else {
+ unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg);
+ unsigned Opc = getSetLocalOpcode(RC);
+ BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc))
+ .addImm(LocalId)
+ .addReg(NewReg);
+ }
+ MI.getOperand(0).setReg(NewReg);
+ MFI.stackifyVReg(NewReg);
+ Changed = true;
+ }
+ }
+
+ // Insert get_locals for any uses that aren't stackified yet.
+ MachineInstr *InsertPt = &MI;
+ for (MachineOperand &MO : reverse(MI.explicit_uses())) {
+ if (!MO.isReg())
+ continue;
+
+ unsigned OldReg = MO.getReg();
+
+ // Inline asm may have a def in the middle of the operands. Our contract
+ // with inline asm register operands is to provide local indices as
+ // immediates.
+ if (MO.isDef()) {
+ assert(MI.getOpcode() == TargetOpcode::INLINEASM);
+ unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg);
+ MRI.removeRegOperandFromUseList(&MO);
+ MO = MachineOperand::CreateImm(LocalId);
+ continue;
+ }
+
+ // If we see a stackified register, prepare to insert subsequent
+ // get_locals before the start of its tree.
+ if (MFI.isVRegStackified(OldReg)) {
+ InsertPt = FindStartOfTree(MO, MRI, MFI);
+ continue;
+ }
+
+ // Our contract with inline asm register operands is to provide local
+ // indices as immediates.
+ if (MI.getOpcode() == TargetOpcode::INLINEASM) {
+ unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg);
+ MRI.removeRegOperandFromUseList(&MO);
+ MO = MachineOperand::CreateImm(LocalId);
+ continue;
+ }
+
+ // Insert a get_local.
+ unsigned LocalId = getLocalId(Reg2Local, CurLocal, OldReg);
+ const TargetRegisterClass *RC = MRI.getRegClass(OldReg);
+ unsigned NewReg = MRI.createVirtualRegister(RC);
+ unsigned Opc = getGetLocalOpcode(RC);
+ InsertPt =
+ BuildMI(MBB, InsertPt, MI.getDebugLoc(), TII->get(Opc), NewReg)
+ .addImm(LocalId);
+ MO.setReg(NewReg);
+ MFI.stackifyVReg(NewReg);
+ Changed = true;
+ }
+
+ // Coalesce and eliminate COPY instructions.
+ if (WebAssembly::isCopy(MI)) {
+ MRI.replaceRegWith(MI.getOperand(1).getReg(),
+ MI.getOperand(0).getReg());
+ MI.eraseFromParent();
+ Changed = true;
+ }
+ }
+ }
+
+ // Define the locals.
+ // TODO: Sort the locals for better compression.
+ MFI.setNumLocals(CurLocal - MFI.getParams().size());
+ for (size_t i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ auto I = Reg2Local.find(Reg);
+ if (I == Reg2Local.end() || I->second < MFI.getParams().size())
+ continue;
+
+ MFI.setLocal(I->second - MFI.getParams().size(),
+ typeForRegClass(MRI.getRegClass(Reg)));
+ Changed = true;
+ }
+
+#ifndef NDEBUG
+ // Assert that all registers have been stackified at this point.
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ if (MI.isDebugValue() || MI.isLabel())
+ continue;
+ for (const MachineOperand &MO : MI.explicit_operands()) {
+ assert(
+ (!MO.isReg() || MRI.use_empty(MO.getReg()) ||
+ MFI.isVRegStackified(MO.getReg())) &&
+ "WebAssemblyExplicitLocals failed to stackify a register operand");
+ }
+ }
+ }
+#endif
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
new file mode 100644
index 000000000000..7e284ea950fd
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -0,0 +1,1328 @@
+//===-- WebAssemblyFastISel.cpp - WebAssembly FastISel implementation -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the WebAssembly-specific support for the FastISel
+/// class. Some of the target-specific code is generated by tablegen in the file
+/// WebAssemblyGenFastISel.inc, which is #included here.
+///
+/// TODO: kill flags
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/CodeGen/FastISel.h"
+#include "llvm/CodeGen/FunctionLoweringInfo.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Operator.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-fastisel"
+
+namespace {
+
+class WebAssemblyFastISel final : public FastISel {
+ // All possible address modes.
+ class Address {
+ public:
+ typedef enum { RegBase, FrameIndexBase } BaseKind;
+
+ private:
+ BaseKind Kind;
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+
+ int64_t Offset;
+
+ const GlobalValue *GV;
+
+ public:
+ // Innocuous defaults for our address.
+ Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; }
+ void setKind(BaseKind K) {
+ assert(!isSet() && "Can't change kind with non-zero base");
+ Kind = K;
+ }
+ BaseKind getKind() const { return Kind; }
+ bool isRegBase() const { return Kind == RegBase; }
+ bool isFIBase() const { return Kind == FrameIndexBase; }
+ void setReg(unsigned Reg) {
+ assert(isRegBase() && "Invalid base register access!");
+ assert(Base.Reg == 0 && "Overwriting non-zero register");
+ Base.Reg = Reg;
+ }
+ unsigned getReg() const {
+ assert(isRegBase() && "Invalid base register access!");
+ return Base.Reg;
+ }
+ void setFI(unsigned FI) {
+ assert(isFIBase() && "Invalid base frame index access!");
+ assert(Base.FI == 0 && "Overwriting non-zero frame index");
+ Base.FI = FI;
+ }
+ unsigned getFI() const {
+ assert(isFIBase() && "Invalid base frame index access!");
+ return Base.FI;
+ }
+
+ void setOffset(int64_t Offset_) {
+ assert(Offset_ >= 0 && "Offsets must be non-negative");
+ Offset = Offset_;
+ }
+ int64_t getOffset() const { return Offset; }
+ void setGlobalValue(const GlobalValue *G) { GV = G; }
+ const GlobalValue *getGlobalValue() const { return GV; }
+ bool isSet() const {
+ if (isRegBase()) {
+ return Base.Reg != 0;
+ } else {
+ return Base.FI != 0;
+ }
+ }
+ };
+
+ /// Keep a pointer to the WebAssemblySubtarget around so that we can make the
+ /// right decision when generating code for different targets.
+ const WebAssemblySubtarget *Subtarget;
+ LLVMContext *Context;
+
+private:
+ // Utility helper routines
+ MVT::SimpleValueType getSimpleType(Type *Ty) {
+ EVT VT = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true);
+ return VT.isSimple() ? VT.getSimpleVT().SimpleTy :
+ MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ MVT::SimpleValueType getLegalType(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ return MVT::i32;
+ case MVT::i32:
+ case MVT::i64:
+ case MVT::f32:
+ case MVT::f64:
+ return VT;
+ case MVT::f16:
+ return MVT::f32;
+ case MVT::v16i8:
+ case MVT::v8i16:
+ case MVT::v4i32:
+ case MVT::v4f32:
+ if (Subtarget->hasSIMD128())
+ return VT;
+ break;
+ default:
+ break;
+ }
+ return MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ bool computeAddress(const Value *Obj, Address &Addr);
+ void materializeLoadStoreOperands(Address &Addr);
+ void addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO);
+ unsigned maskI1Value(unsigned Reg, const Value *V);
+ unsigned getRegForI1Value(const Value *V, bool &Not);
+ unsigned zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned zeroExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned signExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned getRegForUnsignedValue(const Value *V);
+ unsigned getRegForSignedValue(const Value *V);
+ unsigned getRegForPromotedValue(const Value *V, bool IsSigned);
+ unsigned notValue(unsigned Reg);
+ unsigned copyValue(unsigned Reg);
+
+ // Backend specific FastISel code.
+ unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
+ unsigned fastMaterializeConstant(const Constant *C) override;
+ bool fastLowerArguments() override;
+
+ // Selection routines.
+ bool selectCall(const Instruction *I);
+ bool selectSelect(const Instruction *I);
+ bool selectTrunc(const Instruction *I);
+ bool selectZExt(const Instruction *I);
+ bool selectSExt(const Instruction *I);
+ bool selectICmp(const Instruction *I);
+ bool selectFCmp(const Instruction *I);
+ bool selectBitCast(const Instruction *I);
+ bool selectLoad(const Instruction *I);
+ bool selectStore(const Instruction *I);
+ bool selectBr(const Instruction *I);
+ bool selectRet(const Instruction *I);
+ bool selectUnreachable(const Instruction *I);
+
+public:
+ // Backend specific FastISel code.
+ WebAssemblyFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo)
+ : FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
+ Subtarget = &FuncInfo.MF->getSubtarget<WebAssemblySubtarget>();
+ Context = &FuncInfo.Fn->getContext();
+ }
+
+ bool fastSelectInstruction(const Instruction *I) override;
+
+#include "WebAssemblyGenFastISel.inc"
+};
+
+} // end anonymous namespace
+
+bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
+
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
+ if (Addr.getGlobalValue())
+ return false;
+ Addr.setGlobalValue(GV);
+ return true;
+ }
+
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return computeAddress(U->getOperand(0), Addr);
+ }
+ case Instruction::IntToPtr: {
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
+ TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::PtrToInt: {
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ uint64_t TmpOffset = Addr.getOffset();
+ // Non-inbounds geps can wrap; wasm's offsets can't.
+ if (!cast<GEPOperator>(U)->isInBounds())
+ goto unsupported_gep;
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
+ GTI != E; ++GTI) {
+ const Value *Op = GTI.getOperand();
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ break;
+ }
+ if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
+ // An unscaled add of a register. Set it as the new base.
+ unsigned Reg = getRegForValue(Op);
+ if (Reg == 0)
+ return false;
+ Addr.setReg(Reg);
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ // Unsupported
+ goto unsupported_gep;
+ }
+ }
+ }
+ // Don't fold in negative offsets.
+ if (int64_t(TmpOffset) >= 0) {
+ // Try to grab the base operand now.
+ Addr.setOffset(TmpOffset);
+ if (computeAddress(U->getOperand(0), Addr))
+ return true;
+ }
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+ unsupported_gep:
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ if (Addr.isSet()) {
+ return false;
+ }
+ Addr.setKind(Address::FrameIndexBase);
+ Addr.setFI(SI->second);
+ return true;
+ }
+ break;
+ }
+ case Instruction::Add: {
+ // Adds of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (isa<ConstantInt>(LHS))
+ std::swap(LHS, RHS);
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ uint64_t TmpOffset = Addr.getOffset() + CI->getSExtValue();
+ if (int64_t(TmpOffset) >= 0) {
+ Addr.setOffset(TmpOffset);
+ return computeAddress(LHS, Addr);
+ }
+ }
+
+ Address Backup = Addr;
+ if (computeAddress(LHS, Addr) && computeAddress(RHS, Addr))
+ return true;
+ Addr = Backup;
+
+ break;
+ }
+ case Instruction::Sub: {
+ // Subs of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ int64_t TmpOffset = Addr.getOffset() - CI->getSExtValue();
+ if (TmpOffset >= 0) {
+ Addr.setOffset(TmpOffset);
+ return computeAddress(LHS, Addr);
+ }
+ }
+ break;
+ }
+ }
+ if (Addr.isSet()) {
+ return false;
+ }
+ unsigned Reg = getRegForValue(Obj);
+ if (Reg == 0)
+ return false;
+ Addr.setReg(Reg);
+ return Addr.getReg() != 0;
+}
+
+void WebAssemblyFastISel::materializeLoadStoreOperands(Address &Addr) {
+ if (Addr.isRegBase()) {
+ unsigned Reg = Addr.getReg();
+ if (Reg == 0) {
+ Reg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::CONST_I64 :
+ WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Reg)
+ .addImm(0);
+ Addr.setReg(Reg);
+ }
+ }
+}
+
+void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr,
+ const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO) {
+ // Set the alignment operand (this is rewritten in SetP2AlignOperands).
+ // TODO: Disable SetP2AlignOperands for FastISel and just do it here.
+ MIB.addImm(0);
+
+ if (const GlobalValue *GV = Addr.getGlobalValue())
+ MIB.addGlobalAddress(GV, Addr.getOffset());
+ else
+ MIB.addImm(Addr.getOffset());
+
+ if (Addr.isRegBase())
+ MIB.addReg(Addr.getReg());
+ else
+ MIB.addFrameIndex(Addr.getFI());
+
+ MIB.addMemOperand(MMO);
+}
+
+unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) {
+ return zeroExtendToI32(Reg, V, MVT::i1);
+}
+
+unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V, bool &Not) {
+ if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(V))
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ICmp->getOperand(1)))
+ if (ICmp->isEquality() && C->isZero() && C->getType()->isIntegerTy(32)) {
+ Not = ICmp->isTrueWhenEqual();
+ return getRegForValue(ICmp->getOperand(0));
+ }
+
+ if (BinaryOperator::isNot(V)) {
+ Not = true;
+ return getRegForValue(BinaryOperator::getNotArgument(V));
+ }
+
+ Not = false;
+ unsigned Reg = getRegForValue(V);
+ if (Reg == 0)
+ return 0;
+ return maskI1Value(Reg, V);
+}
+
+unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ if (Reg == 0)
+ return 0;
+
+ switch (From) {
+ case MVT::i1:
+ // If the value is naturally an i1, we don't need to mask it.
+ // TODO: Recursively examine selects, phis, and, or, xor, constants.
+ if (From == MVT::i1 && V != nullptr) {
+ if (isa<CmpInst>(V) ||
+ (isa<Argument>(V) && cast<Argument>(V)->hasZExtAttr()))
+ return copyValue(Reg);
+ }
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
+
+ unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::AND_I32), Result)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ return Result;
+}
+
+unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ if (Reg == 0)
+ return 0;
+
+ switch (From) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(32 - MVT(From).getSizeInBits());
+
+ unsigned Left = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::SHL_I32), Left)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ unsigned Right = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::SHR_S_I32), Right)
+ .addReg(Left)
+ .addReg(Imm);
+
+ return Right;
+}
+
+unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = zeroExtendToI32(Reg, V, From);
+
+ unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ return zeroExtendToI32(Reg, V, From);
+}
+
+unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = signExtendToI32(Reg, V, From);
+
+ unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ return signExtendToI32(Reg, V, From);
+}
+
+unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ unsigned VReg = getRegForValue(V);
+ if (VReg == 0)
+ return 0;
+ return zeroExtend(VReg, V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ unsigned VReg = getRegForValue(V);
+ if (VReg == 0)
+ return 0;
+ return signExtend(VReg, V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
+ bool IsSigned) {
+ return IsSigned ? getRegForSignedValue(V) :
+ getRegForUnsignedValue(V);
+}
+
+unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
+ assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass);
+
+ unsigned NotReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::EQZ_I32), NotReg)
+ .addReg(Reg);
+ return NotReg;
+}
+
+unsigned WebAssemblyFastISel::copyValue(unsigned Reg) {
+ unsigned ResultReg = createResultReg(MRI.getRegClass(Reg));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::COPY), ResultReg)
+ .addReg(Reg);
+ return ResultReg;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::COPY_I64 :
+ WebAssembly::COPY_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addFrameIndex(SI->second);
+ return ResultReg;
+ }
+
+ return 0;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
+ unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::CONST_I64 :
+ WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addGlobalAddress(GV);
+ return ResultReg;
+ }
+
+ // Let target-independent code handle it.
+ return 0;
+}
+
+bool WebAssemblyFastISel::fastLowerArguments() {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const Function *F = FuncInfo.Fn;
+ if (F->isVarArg())
+ return false;
+
+ unsigned i = 0;
+ for (auto const &Arg : F->args()) {
+ const AttributeList &Attrs = F->getAttributes();
+ if (Attrs.hasParamAttribute(i, Attribute::ByVal) ||
+ Attrs.hasParamAttribute(i, Attribute::SwiftSelf) ||
+ Attrs.hasParamAttribute(i, Attribute::SwiftError) ||
+ Attrs.hasParamAttribute(i, Attribute::InAlloca) ||
+ Attrs.hasParamAttribute(i, Attribute::Nest))
+ return false;
+
+ Type *ArgTy = Arg.getType();
+ if (ArgTy->isStructTy() || ArgTy->isArrayTy())
+ return false;
+ if (!Subtarget->hasSIMD128() && ArgTy->isVectorTy())
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(ArgTy)) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::ARGUMENT_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::ARGUMENT_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::ARGUMENT_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::ARGUMENT_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ case MVT::v16i8:
+ Opc = WebAssembly::ARGUMENT_v16i8;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v8i16:
+ Opc = WebAssembly::ARGUMENT_v8i16;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v4i32:
+ Opc = WebAssembly::ARGUMENT_v4i32;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ case MVT::v4f32:
+ Opc = WebAssembly::ARGUMENT_v4f32;
+ RC = &WebAssembly::V128RegClass;
+ break;
+ default:
+ return false;
+ }
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addImm(i);
+ updateValueMap(&Arg, ResultReg);
+
+ ++i;
+ }
+
+ MRI.addLiveIn(WebAssembly::ARGUMENTS);
+
+ auto *MFI = MF->getInfo<WebAssemblyFunctionInfo>();
+ for (auto const &Arg : F->args())
+ MFI->addParam(getLegalType(getSimpleType(Arg.getType())));
+
+ if (!F->getReturnType()->isVoidTy())
+ MFI->addResult(getLegalType(getSimpleType(F->getReturnType())));
+
+ return true;
+}
+
+bool WebAssemblyFastISel::selectCall(const Instruction *I) {
+ const CallInst *Call = cast<CallInst>(I);
+
+ if (Call->isMustTailCall() || Call->isInlineAsm() ||
+ Call->getFunctionType()->isVarArg())
+ return false;
+
+ Function *Func = Call->getCalledFunction();
+ if (Func && Func->isIntrinsic())
+ return false;
+
+ bool IsDirect = Func != nullptr;
+ if (!IsDirect && isa<ConstantExpr>(Call->getCalledValue()))
+ return false;
+
+ FunctionType *FuncTy = Call->getFunctionType();
+ unsigned Opc;
+ bool IsVoid = FuncTy->getReturnType()->isVoidTy();
+ unsigned ResultReg;
+ if (IsVoid) {
+ Opc = IsDirect ? WebAssembly::CALL_VOID : WebAssembly::PCALL_INDIRECT_VOID;
+ } else {
+ if (!Subtarget->hasSIMD128() && Call->getType()->isVectorTy())
+ return false;
+
+ MVT::SimpleValueType RetTy = getSimpleType(Call->getType());
+ switch (RetTy) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = IsDirect ? WebAssembly::CALL_I32 : WebAssembly::PCALL_INDIRECT_I32;
+ ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ break;
+ case MVT::i64:
+ Opc = IsDirect ? WebAssembly::CALL_I64 : WebAssembly::PCALL_INDIRECT_I64;
+ ResultReg = createResultReg(&WebAssembly::I64RegClass);
+ break;
+ case MVT::f32:
+ Opc = IsDirect ? WebAssembly::CALL_F32 : WebAssembly::PCALL_INDIRECT_F32;
+ ResultReg = createResultReg(&WebAssembly::F32RegClass);
+ break;
+ case MVT::f64:
+ Opc = IsDirect ? WebAssembly::CALL_F64 : WebAssembly::PCALL_INDIRECT_F64;
+ ResultReg = createResultReg(&WebAssembly::F64RegClass);
+ break;
+ case MVT::v16i8:
+ Opc =
+ IsDirect ? WebAssembly::CALL_v16i8 : WebAssembly::PCALL_INDIRECT_v16i8;
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v8i16:
+ Opc =
+ IsDirect ? WebAssembly::CALL_v8i16 : WebAssembly::PCALL_INDIRECT_v8i16;
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v4i32:
+ Opc =
+ IsDirect ? WebAssembly::CALL_v4i32 : WebAssembly::PCALL_INDIRECT_v4i32;
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ case MVT::v4f32:
+ Opc =
+ IsDirect ? WebAssembly::CALL_v4f32 : WebAssembly::PCALL_INDIRECT_v4f32;
+ ResultReg = createResultReg(&WebAssembly::V128RegClass);
+ break;
+ default:
+ return false;
+ }
+ }
+
+ SmallVector<unsigned, 8> Args;
+ for (unsigned i = 0, e = Call->getNumArgOperands(); i < e; ++i) {
+ Value *V = Call->getArgOperand(i);
+ MVT::SimpleValueType ArgTy = getSimpleType(V->getType());
+ if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
+ return false;
+
+ const AttributeList &Attrs = Call->getAttributes();
+ if (Attrs.hasParamAttribute(i, Attribute::ByVal) ||
+ Attrs.hasParamAttribute(i, Attribute::SwiftSelf) ||
+ Attrs.hasParamAttribute(i, Attribute::SwiftError) ||
+ Attrs.hasParamAttribute(i, Attribute::InAlloca) ||
+ Attrs.hasParamAttribute(i, Attribute::Nest))
+ return false;
+
+ unsigned Reg;
+
+ if (Attrs.hasParamAttribute(i, Attribute::SExt))
+ Reg = getRegForSignedValue(V);
+ else if (Attrs.hasParamAttribute(i, Attribute::ZExt))
+ Reg = getRegForUnsignedValue(V);
+ else
+ Reg = getRegForValue(V);
+
+ if (Reg == 0)
+ return false;
+
+ Args.push_back(Reg);
+ }
+
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+
+ if (!IsVoid)
+ MIB.addReg(ResultReg, RegState::Define);
+
+ if (IsDirect)
+ MIB.addGlobalAddress(Func);
+ else {
+ unsigned Reg = getRegForValue(Call->getCalledValue());
+ if (Reg == 0)
+ return false;
+ MIB.addReg(Reg);
+ }
+
+ for (unsigned ArgReg : Args)
+ MIB.addReg(ArgReg);
+
+ if (!IsVoid)
+ updateValueMap(Call, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
+ const SelectInst *Select = cast<SelectInst>(I);
+
+ bool Not;
+ unsigned CondReg = getRegForI1Value(Select->getCondition(), Not);
+ if (CondReg == 0)
+ return false;
+
+ unsigned TrueReg = getRegForValue(Select->getTrueValue());
+ if (TrueReg == 0)
+ return false;
+
+ unsigned FalseReg = getRegForValue(Select->getFalseValue());
+ if (FalseReg == 0)
+ return false;
+
+ if (Not)
+ std::swap(TrueReg, FalseReg);
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(Select->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::SELECT_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::SELECT_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::SELECT_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::SELECT_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(TrueReg)
+ .addReg(FalseReg)
+ .addReg(CondReg);
+
+ updateValueMap(Select, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
+ const TruncInst *Trunc = cast<TruncInst>(I);
+
+ unsigned Reg = getRegForValue(Trunc->getOperand(0));
+ if (Reg == 0)
+ return false;
+
+ if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) {
+ unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I32_WRAP_I64), Result)
+ .addReg(Reg);
+ Reg = Result;
+ }
+
+ updateValueMap(Trunc, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
+ const ZExtInst *ZExt = cast<ZExtInst>(I);
+
+ const Value *Op = ZExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(ZExt->getType()));
+ unsigned In = getRegForValue(Op);
+ if (In == 0)
+ return false;
+ unsigned Reg = zeroExtend(In, Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(ZExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
+ const SExtInst *SExt = cast<SExtInst>(I);
+
+ const Value *Op = SExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(SExt->getType()));
+ unsigned In = getRegForValue(Op);
+ if (In == 0)
+ return false;
+ unsigned Reg = signExtend(In, Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(SExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
+ const ICmpInst *ICmp = cast<ICmpInst>(I);
+
+ bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64;
+ unsigned Opc;
+ bool isSigned = false;
+ switch (ICmp->getPredicate()) {
+ case ICmpInst::ICMP_EQ:
+ Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64;
+ break;
+ case ICmpInst::ICMP_NE:
+ Opc = I32 ? WebAssembly::NE_I32 : WebAssembly::NE_I64;
+ break;
+ case ICmpInst::ICMP_UGT:
+ Opc = I32 ? WebAssembly::GT_U_I32 : WebAssembly::GT_U_I64;
+ break;
+ case ICmpInst::ICMP_UGE:
+ Opc = I32 ? WebAssembly::GE_U_I32 : WebAssembly::GE_U_I64;
+ break;
+ case ICmpInst::ICMP_ULT:
+ Opc = I32 ? WebAssembly::LT_U_I32 : WebAssembly::LT_U_I64;
+ break;
+ case ICmpInst::ICMP_ULE:
+ Opc = I32 ? WebAssembly::LE_U_I32 : WebAssembly::LE_U_I64;
+ break;
+ case ICmpInst::ICMP_SGT:
+ Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SGE:
+ Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SLT:
+ Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SLE:
+ Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64;
+ isSigned = true;
+ break;
+ default: return false;
+ }
+
+ unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), isSigned);
+ if (LHS == 0)
+ return false;
+
+ unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), isSigned);
+ if (RHS == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+ updateValueMap(ICmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
+ const FCmpInst *FCmp = cast<FCmpInst>(I);
+
+ unsigned LHS = getRegForValue(FCmp->getOperand(0));
+ if (LHS == 0)
+ return false;
+
+ unsigned RHS = getRegForValue(FCmp->getOperand(1));
+ if (RHS == 0)
+ return false;
+
+ bool F32 = getSimpleType(FCmp->getOperand(0)->getType()) != MVT::f64;
+ unsigned Opc;
+ bool Not = false;
+ switch (FCmp->getPredicate()) {
+ case FCmpInst::FCMP_OEQ:
+ Opc = F32 ? WebAssembly::EQ_F32 : WebAssembly::EQ_F64;
+ break;
+ case FCmpInst::FCMP_UNE:
+ Opc = F32 ? WebAssembly::NE_F32 : WebAssembly::NE_F64;
+ break;
+ case FCmpInst::FCMP_OGT:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ break;
+ case FCmpInst::FCMP_OGE:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ break;
+ case FCmpInst::FCMP_OLT:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ break;
+ case FCmpInst::FCMP_OLE:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ break;
+ case FCmpInst::FCMP_UGT:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_UGE:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULT:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULE:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ Not = true;
+ break;
+ default:
+ return false;
+ }
+
+ unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+
+ if (Not)
+ ResultReg = notValue(ResultReg);
+
+ updateValueMap(FCmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
+ // Target-independent code can handle this, except it doesn't set the dead
+ // flag on the ARGUMENTS clobber, so we have to do that manually in order
+ // to satisfy code that expects this of isBitcast() instructions.
+ EVT VT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT RetVT = TLI.getValueType(DL, I->getType());
+ if (!VT.isSimple() || !RetVT.isSimple())
+ return false;
+
+ unsigned In = getRegForValue(I->getOperand(0));
+ if (In == 0)
+ return false;
+
+ if (VT == RetVT) {
+ // No-op bitcast.
+ updateValueMap(I, In);
+ return true;
+ }
+
+ unsigned Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(),
+ In, I->getOperand(0)->hasOneUse());
+ if (!Reg)
+ return false;
+ MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;
+ --Iter;
+ assert(Iter->isBitcast());
+ Iter->setPhysRegsDeadExcept(ArrayRef<unsigned>(), TRI);
+ updateValueMap(I, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
+ const LoadInst *Load = cast<LoadInst>(I);
+ if (Load->isAtomic())
+ return false;
+ if (!Subtarget->hasSIMD128() && Load->getType()->isVectorTy())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Load->getPointerOperand(), Addr))
+ return false;
+
+ // TODO: Fold a following sign-/zero-extend into the load instruction.
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(Load->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ Opc = WebAssembly::LOAD8_U_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i16:
+ Opc = WebAssembly::LOAD16_U_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i32:
+ Opc = WebAssembly::LOAD_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::LOAD_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::LOAD_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::LOAD_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ unsigned ResultReg = createResultReg(RC);
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg);
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load));
+
+ updateValueMap(Load, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectStore(const Instruction *I) {
+ const StoreInst *Store = cast<StoreInst>(I);
+ if (Store->isAtomic())
+ return false;
+ if (!Subtarget->hasSIMD128() &&
+ Store->getValueOperand()->getType()->isVectorTy())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Store->getPointerOperand(), Addr))
+ return false;
+
+ unsigned Opc;
+ bool VTIsi1 = false;
+ switch (getSimpleType(Store->getValueOperand()->getType())) {
+ case MVT::i1:
+ VTIsi1 = true;
+ case MVT::i8:
+ Opc = WebAssembly::STORE8_I32;
+ break;
+ case MVT::i16:
+ Opc = WebAssembly::STORE16_I32;
+ break;
+ case MVT::i32:
+ Opc = WebAssembly::STORE_I32;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::STORE_I64;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::STORE_F32;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::STORE_F64;
+ break;
+ default: return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ unsigned ValueReg = getRegForValue(Store->getValueOperand());
+ if (ValueReg == 0)
+ return false;
+ if (VTIsi1)
+ ValueReg = maskI1Value(ValueReg, Store->getValueOperand());
+
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store));
+
+ MIB.addReg(ValueReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBr(const Instruction *I) {
+ const BranchInst *Br = cast<BranchInst>(I);
+ if (Br->isUnconditional()) {
+ MachineBasicBlock *MSucc = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ fastEmitBranch(MSucc, Br->getDebugLoc());
+ return true;
+ }
+
+ MachineBasicBlock *TBB = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ MachineBasicBlock *FBB = FuncInfo.MBBMap[Br->getSuccessor(1)];
+
+ bool Not;
+ unsigned CondReg = getRegForI1Value(Br->getCondition(), Not);
+ if (CondReg == 0)
+ return false;
+
+ unsigned Opc = WebAssembly::BR_IF;
+ if (Not)
+ Opc = WebAssembly::BR_UNLESS;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addMBB(TBB)
+ .addReg(CondReg);
+
+ finishCondBranch(Br->getParent(), TBB, FBB);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectRet(const Instruction *I) {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+
+ if (Ret->getNumOperands() == 0) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::RETURN_VOID));
+ return true;
+ }
+
+ Value *RV = Ret->getOperand(0);
+ if (!Subtarget->hasSIMD128() && RV->getType()->isVectorTy())
+ return false;
+
+ unsigned Opc;
+ switch (getSimpleType(RV->getType())) {
+ case MVT::i1: case MVT::i8:
+ case MVT::i16: case MVT::i32:
+ Opc = WebAssembly::RETURN_I32;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::RETURN_I64;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::RETURN_F32;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::RETURN_F64;
+ break;
+ case MVT::v16i8:
+ Opc = WebAssembly::RETURN_v16i8;
+ break;
+ case MVT::v8i16:
+ Opc = WebAssembly::RETURN_v8i16;
+ break;
+ case MVT::v4i32:
+ Opc = WebAssembly::RETURN_v4i32;
+ break;
+ case MVT::v4f32:
+ Opc = WebAssembly::RETURN_v4f32;
+ break;
+ default: return false;
+ }
+
+ unsigned Reg;
+ if (FuncInfo.Fn->getAttributes().hasAttribute(0, Attribute::SExt))
+ Reg = getRegForSignedValue(RV);
+ else if (FuncInfo.Fn->getAttributes().hasAttribute(0, Attribute::ZExt))
+ Reg = getRegForUnsignedValue(RV);
+ else
+ Reg = getRegForValue(RV);
+
+ if (Reg == 0)
+ return false;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)).addReg(Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::UNREACHABLE));
+ return true;
+}
+
+bool WebAssemblyFastISel::fastSelectInstruction(const Instruction *I) {
+ switch (I->getOpcode()) {
+ case Instruction::Call:
+ if (selectCall(I))
+ return true;
+ break;
+ case Instruction::Select: return selectSelect(I);
+ case Instruction::Trunc: return selectTrunc(I);
+ case Instruction::ZExt: return selectZExt(I);
+ case Instruction::SExt: return selectSExt(I);
+ case Instruction::ICmp: return selectICmp(I);
+ case Instruction::FCmp: return selectFCmp(I);
+ case Instruction::BitCast: return selectBitCast(I);
+ case Instruction::Load: return selectLoad(I);
+ case Instruction::Store: return selectStore(I);
+ case Instruction::Br: return selectBr(I);
+ case Instruction::Ret: return selectRet(I);
+ case Instruction::Unreachable: return selectUnreachable(I);
+ default: break;
+ }
+
+ // Fall back to target-independent instruction selection.
+ return selectOperator(I, I->getOpcode());
+}
+
+FastISel *WebAssembly::createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) {
+ return new WebAssemblyFastISel(FuncInfo, LibInfo);
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
new file mode 100644
index 000000000000..666337acccce
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp
@@ -0,0 +1,234 @@
+//===-- WebAssemblyFixFunctionBitcasts.cpp - Fix function bitcasts --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Fix bitcasted functions.
+///
+/// WebAssembly requires caller and callee signatures to match, however in LLVM,
+/// some amount of slop is vaguely permitted. Detect mismatch by looking for
+/// bitcasts of functions and rewrite them to use wrapper functions instead.
+///
+/// This doesn't catch all cases, such as when a function's address is taken in
+/// one place and casted in another, but it works for many common cases.
+///
+/// Note that LLVM already optimizes away function bitcasts in common cases by
+/// dropping arguments as needed, so this pass only ends up getting used in less
+/// common cases.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-fix-function-bitcasts"
+
+static cl::opt<bool> TemporaryWorkarounds(
+ "wasm-temporary-workarounds",
+ cl::desc("Apply certain temporary workarounds"),
+ cl::init(true), cl::Hidden);
+
+namespace {
+class FixFunctionBitcasts final : public ModulePass {
+ StringRef getPassName() const override {
+ return "WebAssembly Fix Function Bitcasts";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ ModulePass::getAnalysisUsage(AU);
+ }
+
+ bool runOnModule(Module &M) override;
+
+public:
+ static char ID;
+ FixFunctionBitcasts() : ModulePass(ID) {}
+};
+} // End anonymous namespace
+
+char FixFunctionBitcasts::ID = 0;
+ModulePass *llvm::createWebAssemblyFixFunctionBitcasts() {
+ return new FixFunctionBitcasts();
+}
+
+// Recursively descend the def-use lists from V to find non-bitcast users of
+// bitcasts of V.
+static void FindUses(Value *V, Function &F,
+ SmallVectorImpl<std::pair<Use *, Function *>> &Uses,
+ SmallPtrSetImpl<Constant *> &ConstantBCs) {
+ for (Use &U : V->uses()) {
+ if (BitCastOperator *BC = dyn_cast<BitCastOperator>(U.getUser()))
+ FindUses(BC, F, Uses, ConstantBCs);
+ else if (U.get()->getType() != F.getType()) {
+ CallSite CS(U.getUser());
+ if (!CS)
+ // Skip uses that aren't immediately called
+ continue;
+ Value *Callee = CS.getCalledValue();
+ if (Callee != V)
+ // Skip calls where the function isn't the callee
+ continue;
+ if (isa<Constant>(U.get())) {
+ // Only add constant bitcasts to the list once; they get RAUW'd
+ auto c = ConstantBCs.insert(cast<Constant>(U.get()));
+ if (!c.second)
+ continue;
+ }
+ Uses.push_back(std::make_pair(&U, &F));
+ }
+ }
+}
+
+// Create a wrapper function with type Ty that calls F (which may have a
+// different type). Attempt to support common bitcasted function idioms:
+// - Call with more arguments than needed: arguments are dropped
+// - Call with fewer arguments than needed: arguments are filled in with undef
+// - Return value is not needed: drop it
+// - Return value needed but not present: supply an undef
+//
+// For now, return nullptr without creating a wrapper if the wrapper cannot
+// be generated due to incompatible types.
+static Function *CreateWrapper(Function *F, FunctionType *Ty) {
+ Module *M = F->getParent();
+
+ Function *Wrapper =
+ Function::Create(Ty, Function::PrivateLinkage, "bitcast", M);
+ BasicBlock *BB = BasicBlock::Create(M->getContext(), "body", Wrapper);
+
+ // Determine what arguments to pass.
+ SmallVector<Value *, 4> Args;
+ Function::arg_iterator AI = Wrapper->arg_begin();
+ Function::arg_iterator AE = Wrapper->arg_end();
+ FunctionType::param_iterator PI = F->getFunctionType()->param_begin();
+ FunctionType::param_iterator PE = F->getFunctionType()->param_end();
+ for (; AI != AE && PI != PE; ++AI, ++PI) {
+ if (AI->getType() != *PI) {
+ Wrapper->eraseFromParent();
+ return nullptr;
+ }
+ Args.push_back(&*AI);
+ }
+ for (; PI != PE; ++PI)
+ Args.push_back(UndefValue::get(*PI));
+ if (F->isVarArg())
+ for (; AI != AE; ++AI)
+ Args.push_back(&*AI);
+
+ CallInst *Call = CallInst::Create(F, Args, "", BB);
+
+ // Determine what value to return.
+ if (Ty->getReturnType()->isVoidTy())
+ ReturnInst::Create(M->getContext(), BB);
+ else if (F->getFunctionType()->getReturnType()->isVoidTy())
+ ReturnInst::Create(M->getContext(), UndefValue::get(Ty->getReturnType()),
+ BB);
+ else if (F->getFunctionType()->getReturnType() == Ty->getReturnType())
+ ReturnInst::Create(M->getContext(), Call, BB);
+ else {
+ Wrapper->eraseFromParent();
+ return nullptr;
+ }
+
+ return Wrapper;
+}
+
+bool FixFunctionBitcasts::runOnModule(Module &M) {
+ Function *Main = nullptr;
+ CallInst *CallMain = nullptr;
+ SmallVector<std::pair<Use *, Function *>, 0> Uses;
+ SmallPtrSet<Constant *, 2> ConstantBCs;
+
+ // Collect all the places that need wrappers.
+ for (Function &F : M) {
+ FindUses(&F, F, Uses, ConstantBCs);
+
+ // If we have a "main" function, and its type isn't
+ // "int main(int argc, char *argv[])", create an artificial call with it
+ // bitcasted to that type so that we generate a wrapper for it, so that
+ // the C runtime can call it.
+ if (!TemporaryWorkarounds && !F.isDeclaration() && F.getName() == "main") {
+ Main = &F;
+ LLVMContext &C = M.getContext();
+ Type *MainArgTys[] = {
+ PointerType::get(Type::getInt8PtrTy(C), 0),
+ Type::getInt32Ty(C)
+ };
+ FunctionType *MainTy = FunctionType::get(Type::getInt32Ty(C), MainArgTys,
+ /*isVarArg=*/false);
+ if (F.getFunctionType() != MainTy) {
+ Value *Args[] = {
+ UndefValue::get(MainArgTys[0]),
+ UndefValue::get(MainArgTys[1])
+ };
+ Value *Casted = ConstantExpr::getBitCast(Main,
+ PointerType::get(MainTy, 0));
+ CallMain = CallInst::Create(Casted, Args, "call_main");
+ Use *UseMain = &CallMain->getOperandUse(2);
+ Uses.push_back(std::make_pair(UseMain, &F));
+ }
+ }
+ }
+
+ DenseMap<std::pair<Function *, FunctionType *>, Function *> Wrappers;
+
+ for (auto &UseFunc : Uses) {
+ Use *U = UseFunc.first;
+ Function *F = UseFunc.second;
+ PointerType *PTy = cast<PointerType>(U->get()->getType());
+ FunctionType *Ty = dyn_cast<FunctionType>(PTy->getElementType());
+
+ // If the function is casted to something like i8* as a "generic pointer"
+ // to be later casted to something else, we can't generate a wrapper for it.
+ // Just ignore such casts for now.
+ if (!Ty)
+ continue;
+
+ // Bitcasted vararg functions occur in Emscripten's implementation of
+ // EM_ASM, so suppress wrappers for them for now.
+ if (TemporaryWorkarounds && (Ty->isVarArg() || F->isVarArg()))
+ continue;
+
+ auto Pair = Wrappers.insert(std::make_pair(std::make_pair(F, Ty), nullptr));
+ if (Pair.second)
+ Pair.first->second = CreateWrapper(F, Ty);
+
+ Function *Wrapper = Pair.first->second;
+ if (!Wrapper)
+ continue;
+
+ if (isa<Constant>(U->get()))
+ U->get()->replaceAllUsesWith(Wrapper);
+ else
+ U->set(Wrapper);
+ }
+
+ // If we created a wrapper for main, rename the wrapper so that it's the
+ // one that gets called from startup.
+ if (CallMain) {
+ Main->setName("__original_main");
+ Function *MainWrapper =
+ cast<Function>(CallMain->getCalledValue()->stripPointerCasts());
+ MainWrapper->setName("main");
+ MainWrapper->setLinkage(Main->getLinkage());
+ MainWrapper->setVisibility(Main->getVisibility());
+ Main->setLinkage(Function::PrivateLinkage);
+ Main->setVisibility(Function::DefaultVisibility);
+ delete CallMain;
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
new file mode 100644
index 000000000000..88daea7e3681
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFixIrreducibleControlFlow.cpp
@@ -0,0 +1,295 @@
+//=- WebAssemblyFixIrreducibleControlFlow.cpp - Fix irreducible control flow -//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a pass that transforms irreducible control flow
+/// into reducible control flow. Irreducible control flow means multiple-entry
+/// loops; they appear as CFG cycles that are not recorded in MachineLoopInfo
+/// due to being unnatural.
+///
+/// Note that LLVM has a generic pass that lowers irreducible control flow, but
+/// it linearizes control flow, turning diamonds into two triangles, which is
+/// both unnecessary and undesirable for WebAssembly.
+///
+/// TODO: The transformation implemented here handles all irreducible control
+/// flow, without exponential code-size expansion, though it does so by creating
+/// inefficient code in many cases. Ideally, we should add other
+/// transformations, including code-duplicating cases, which can be more
+/// efficient in common cases, and they can fall back to this conservative
+/// implementation as needed.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/ADT/PriorityQueue.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-fix-irreducible-control-flow"
+
+namespace {
+class WebAssemblyFixIrreducibleControlFlow final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Fix Irreducible Control Flow";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ bool VisitLoop(MachineFunction &MF, MachineLoopInfo &MLI, MachineLoop *Loop);
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyFixIrreducibleControlFlow() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyFixIrreducibleControlFlow::ID = 0;
+FunctionPass *llvm::createWebAssemblyFixIrreducibleControlFlow() {
+ return new WebAssemblyFixIrreducibleControlFlow();
+}
+
+namespace {
+
+/// A utility for walking the blocks of a loop, handling a nested inner
+/// loop as a monolithic conceptual block.
+class MetaBlock {
+ MachineBasicBlock *Block;
+ SmallVector<MachineBasicBlock *, 2> Preds;
+ SmallVector<MachineBasicBlock *, 2> Succs;
+
+public:
+ explicit MetaBlock(MachineBasicBlock *MBB)
+ : Block(MBB), Preds(MBB->pred_begin(), MBB->pred_end()),
+ Succs(MBB->succ_begin(), MBB->succ_end()) {}
+
+ explicit MetaBlock(MachineLoop *Loop) : Block(Loop->getHeader()) {
+ Loop->getExitBlocks(Succs);
+ for (MachineBasicBlock *Pred : Block->predecessors())
+ if (!Loop->contains(Pred))
+ Preds.push_back(Pred);
+ }
+
+ MachineBasicBlock *getBlock() const { return Block; }
+
+ const SmallVectorImpl<MachineBasicBlock *> &predecessors() const {
+ return Preds;
+ }
+ const SmallVectorImpl<MachineBasicBlock *> &successors() const {
+ return Succs;
+ }
+
+ bool operator==(const MetaBlock &MBB) { return Block == MBB.Block; }
+ bool operator!=(const MetaBlock &MBB) { return Block != MBB.Block; }
+};
+
+class SuccessorList final : public MetaBlock {
+ size_t Index;
+ size_t Num;
+
+public:
+ explicit SuccessorList(MachineBasicBlock *MBB)
+ : MetaBlock(MBB), Index(0), Num(successors().size()) {}
+
+ explicit SuccessorList(MachineLoop *Loop)
+ : MetaBlock(Loop), Index(0), Num(successors().size()) {}
+
+ bool HasNext() const { return Index != Num; }
+
+ MachineBasicBlock *Next() {
+ assert(HasNext());
+ return successors()[Index++];
+ }
+};
+
+} // end anonymous namespace
+
+bool WebAssemblyFixIrreducibleControlFlow::VisitLoop(MachineFunction &MF,
+ MachineLoopInfo &MLI,
+ MachineLoop *Loop) {
+ MachineBasicBlock *Header = Loop ? Loop->getHeader() : &*MF.begin();
+ SetVector<MachineBasicBlock *> RewriteSuccs;
+
+ // DFS through Loop's body, looking for for irreducible control flow. Loop is
+ // natural, and we stay in its body, and we treat any nested loops
+ // monolithically, so any cycles we encounter indicate irreducibility.
+ SmallPtrSet<MachineBasicBlock *, 8> OnStack;
+ SmallPtrSet<MachineBasicBlock *, 8> Visited;
+ SmallVector<SuccessorList, 4> LoopWorklist;
+ LoopWorklist.push_back(SuccessorList(Header));
+ OnStack.insert(Header);
+ Visited.insert(Header);
+ while (!LoopWorklist.empty()) {
+ SuccessorList &Top = LoopWorklist.back();
+ if (Top.HasNext()) {
+ MachineBasicBlock *Next = Top.Next();
+ if (Next == Header || (Loop && !Loop->contains(Next)))
+ continue;
+ if (LLVM_LIKELY(OnStack.insert(Next).second)) {
+ if (!Visited.insert(Next).second) {
+ OnStack.erase(Next);
+ continue;
+ }
+ MachineLoop *InnerLoop = MLI.getLoopFor(Next);
+ if (InnerLoop != Loop)
+ LoopWorklist.push_back(SuccessorList(InnerLoop));
+ else
+ LoopWorklist.push_back(SuccessorList(Next));
+ } else {
+ RewriteSuccs.insert(Top.getBlock());
+ }
+ continue;
+ }
+ OnStack.erase(Top.getBlock());
+ LoopWorklist.pop_back();
+ }
+
+ // Most likely, we didn't find any irreducible control flow.
+ if (LLVM_LIKELY(RewriteSuccs.empty()))
+ return false;
+
+ DEBUG(dbgs() << "Irreducible control flow detected!\n");
+
+ // Ok. We have irreducible control flow! Create a dispatch block which will
+ // contains a jump table to any block in the problematic set of blocks.
+ MachineBasicBlock *Dispatch = MF.CreateMachineBasicBlock();
+ MF.insert(MF.end(), Dispatch);
+ MLI.changeLoopFor(Dispatch, Loop);
+
+ // Add the jump table.
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ MachineInstrBuilder MIB = BuildMI(*Dispatch, Dispatch->end(), DebugLoc(),
+ TII.get(WebAssembly::BR_TABLE_I32));
+
+ // Add the register which will be used to tell the jump table which block to
+ // jump to.
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ unsigned Reg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ MIB.addReg(Reg);
+
+ // Collect all the blocks which need to have their successors rewritten,
+ // add the successors to the jump table, and remember their index.
+ DenseMap<MachineBasicBlock *, unsigned> Indices;
+ SmallVector<MachineBasicBlock *, 4> SuccWorklist(RewriteSuccs.begin(),
+ RewriteSuccs.end());
+ while (!SuccWorklist.empty()) {
+ MachineBasicBlock *MBB = SuccWorklist.pop_back_val();
+ auto Pair = Indices.insert(std::make_pair(MBB, 0));
+ if (!Pair.second)
+ continue;
+
+ unsigned Index = MIB.getInstr()->getNumExplicitOperands() - 1;
+ DEBUG(dbgs() << printMBBReference(*MBB) << " has index " << Index << "\n");
+
+ Pair.first->second = Index;
+ for (auto Pred : MBB->predecessors())
+ RewriteSuccs.insert(Pred);
+
+ MIB.addMBB(MBB);
+ Dispatch->addSuccessor(MBB);
+
+ MetaBlock Meta(MBB);
+ for (auto *Succ : Meta.successors())
+ if (Succ != Header && (!Loop || Loop->contains(Succ)))
+ SuccWorklist.push_back(Succ);
+ }
+
+ // Rewrite the problematic successors for every block in RewriteSuccs.
+ // For simplicity, we just introduce a new block for every edge we need to
+ // rewrite. Fancier things are possible.
+ for (MachineBasicBlock *MBB : RewriteSuccs) {
+ DenseMap<MachineBasicBlock *, MachineBasicBlock *> Map;
+ for (auto *Succ : MBB->successors()) {
+ if (!Indices.count(Succ))
+ continue;
+
+ MachineBasicBlock *Split = MF.CreateMachineBasicBlock();
+ MF.insert(MBB->isLayoutSuccessor(Succ) ? MachineFunction::iterator(Succ)
+ : MF.end(),
+ Split);
+ MLI.changeLoopFor(Split, Loop);
+
+ // Set the jump table's register of the index of the block we wish to
+ // jump to, and jump to the jump table.
+ BuildMI(*Split, Split->end(), DebugLoc(), TII.get(WebAssembly::CONST_I32),
+ Reg)
+ .addImm(Indices[Succ]);
+ BuildMI(*Split, Split->end(), DebugLoc(), TII.get(WebAssembly::BR))
+ .addMBB(Dispatch);
+ Split->addSuccessor(Dispatch);
+ Map[Succ] = Split;
+ }
+ // Remap the terminator operands and the successor list.
+ for (MachineInstr &Term : MBB->terminators())
+ for (auto &Op : Term.explicit_uses())
+ if (Op.isMBB() && Indices.count(Op.getMBB()))
+ Op.setMBB(Map[Op.getMBB()]);
+ for (auto Rewrite : Map)
+ MBB->replaceSuccessor(Rewrite.first, Rewrite.second);
+ }
+
+ // Create a fake default label, because br_table requires one.
+ MIB.addMBB(MIB.getInstr()
+ ->getOperand(MIB.getInstr()->getNumExplicitOperands() - 1)
+ .getMBB());
+
+ return true;
+}
+
+bool WebAssemblyFixIrreducibleControlFlow::runOnMachineFunction(
+ MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Fixing Irreducible Control Flow **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ bool Changed = false;
+ auto &MLI = getAnalysis<MachineLoopInfo>();
+
+ // Visit the function body, which is identified as a null loop.
+ Changed |= VisitLoop(MF, MLI, nullptr);
+
+ // Visit all the loops.
+ SmallVector<MachineLoop *, 8> Worklist(MLI.begin(), MLI.end());
+ while (!Worklist.empty()) {
+ MachineLoop *CurLoop = Worklist.pop_back_val();
+ Worklist.append(CurLoop->begin(), CurLoop->end());
+ Changed |= VisitLoop(MF, MLI, CurLoop);
+ }
+
+ // If we made any changes, completely recompute everything.
+ if (LLVM_UNLIKELY(Changed)) {
+ DEBUG(dbgs() << "Recomputing dominators and loops.\n");
+ MF.getRegInfo().invalidateLiveness();
+ MF.RenumberBlocks();
+ getAnalysis<MachineDominatorTree>().runOnMachineFunction(MF);
+ MLI.runOnMachineFunction(MF);
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
new file mode 100644
index 000000000000..84246052f601
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.cpp
@@ -0,0 +1,276 @@
+//===-- WebAssemblyFrameLowering.cpp - WebAssembly Frame Lowering ----------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the WebAssembly implementation of
+/// TargetFrameLowering class.
+///
+/// On WebAssembly, there aren't a lot of things to do here. There are no
+/// callee-saved registers to save, and no spill slots.
+///
+/// The stack grows downward.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyFrameLowering.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyTargetMachine.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-frame-info"
+
+// TODO: wasm64
+// TODO: Emit TargetOpcode::CFI_INSTRUCTION instructions
+
+/// We need a base pointer in the case of having items on the stack that
+/// require stricter alignment than the stack pointer itself. Because we need
+/// to shift the stack pointer by some unknown amount to force the alignment,
+/// we need to record the value of the stack pointer on entry to the function.
+bool WebAssemblyFrameLowering::hasBP(
+ const MachineFunction &MF) const {
+ const auto *RegInfo =
+ MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+ return RegInfo->needsStackRealignment(MF);
+}
+
+/// Return true if the specified function should have a dedicated frame pointer
+/// register.
+bool WebAssemblyFrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ // When we have var-sized objects, we move the stack pointer by an unknown
+ // amount, and need to emit a frame pointer to restore the stack to where we
+ // were on function entry.
+ // If we already need a base pointer, we use that to fix up the stack pointer.
+ // If there are no fixed-size objects, we would have no use of a frame
+ // pointer, and thus should not emit one.
+ bool HasFixedSizedObjects = MFI.getStackSize() > 0;
+ bool NeedsFixedReference = !hasBP(MF) || HasFixedSizedObjects;
+
+ return MFI.isFrameAddressTaken() ||
+ (MFI.hasVarSizedObjects() && NeedsFixedReference) ||
+ MFI.hasStackMap() || MFI.hasPatchPoint();
+}
+
+/// Under normal circumstances, when a frame pointer is not required, we reserve
+/// argument space for call sites in the function immediately on entry to the
+/// current function. This eliminates the need for add/sub sp brackets around
+/// call sites. Returns true if the call frame is included as part of the stack
+/// frame.
+bool WebAssemblyFrameLowering::hasReservedCallFrame(
+ const MachineFunction &MF) const {
+ return !MF.getFrameInfo().hasVarSizedObjects();
+}
+
+
+/// Returns true if this function needs a local user-space stack pointer.
+/// Unlike a machine stack pointer, the wasm user stack pointer is a global
+/// variable, so it is loaded into a register in the prolog.
+bool WebAssemblyFrameLowering::needsSP(const MachineFunction &MF,
+ const MachineFrameInfo &MFI) const {
+ return MFI.getStackSize() || MFI.adjustsStack() || hasFP(MF);
+}
+
+/// Returns true if the local user-space stack pointer needs to be written back
+/// to memory by this function (this is not meaningful if needsSP is false). If
+/// false, the stack red zone can be used and only a local SP is needed.
+bool WebAssemblyFrameLowering::needsSPWriteback(
+ const MachineFunction &MF, const MachineFrameInfo &MFI) const {
+ assert(needsSP(MF, MFI));
+ return MFI.getStackSize() > RedZoneSize || MFI.hasCalls() ||
+ MF.getFunction().hasFnAttribute(Attribute::NoRedZone);
+}
+
+static void writeSPToMemory(unsigned SrcReg, MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &InsertAddr,
+ MachineBasicBlock::iterator &InsertStore,
+ const DebugLoc &DL) {
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ const char *ES = "__stack_pointer";
+ auto *SPSymbol = MF.createExternalSymbolName(ES);
+ if (MF.getSubtarget<WebAssemblySubtarget>()
+ .getTargetTriple().isOSBinFormatELF()) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned Zero = MRI.createVirtualRegister(PtrRC);
+
+ BuildMI(MBB, InsertAddr, DL, TII->get(WebAssembly::CONST_I32), Zero)
+ .addImm(0);
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo(MF.getPSVManager().getExternalSymbolCallEntry(ES)),
+ MachineMemOperand::MOStore, 4, 4);
+ BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::STORE_I32))
+ .addImm(2) // p2align
+ .addExternalSymbol(SPSymbol)
+ .addReg(Zero)
+ .addReg(SrcReg)
+ .addMemOperand(MMO);
+ } else {
+ BuildMI(MBB, InsertStore, DL, TII->get(WebAssembly::SET_GLOBAL_I32))
+ .addExternalSymbol(SPSymbol)
+ .addReg(SrcReg);
+ }
+}
+
+MachineBasicBlock::iterator
+WebAssemblyFrameLowering::eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ assert(!I->getOperand(0).getImm() && (hasFP(MF) || hasBP(MF)) &&
+ "Call frame pseudos should only be used for dynamic stack adjustment");
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ if (I->getOpcode() == TII->getCallFrameDestroyOpcode() &&
+ needsSPWriteback(MF, MF.getFrameInfo())) {
+ DebugLoc DL = I->getDebugLoc();
+ writeSPToMemory(WebAssembly::SP32, MF, MBB, I, I, DL);
+ }
+ return MBB.erase(I);
+}
+
+void WebAssemblyFrameLowering::emitPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ // TODO: Do ".setMIFlag(MachineInstr::FrameSetup)" on emitted instructions
+ auto &MFI = MF.getFrameInfo();
+ assert(MFI.getCalleeSavedInfo().empty() &&
+ "WebAssembly should not have callee-saved registers");
+
+ if (!needsSP(MF, MFI)) return;
+ uint64_t StackSize = MFI.getStackSize();
+
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ auto &MRI = MF.getRegInfo();
+
+ auto InsertPt = MBB.begin();
+ while (InsertPt != MBB.end() && WebAssembly::isArgument(*InsertPt))
+ ++InsertPt;
+ DebugLoc DL;
+
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned SPReg = WebAssembly::SP32;
+ if (StackSize)
+ SPReg = MRI.createVirtualRegister(PtrRC);
+
+ const char *ES = "__stack_pointer";
+ auto *SPSymbol = MF.createExternalSymbolName(ES);
+ if (MF.getSubtarget<WebAssemblySubtarget>()
+ .getTargetTriple().isOSBinFormatELF()) {
+ unsigned Zero = MRI.createVirtualRegister(PtrRC);
+
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), Zero)
+ .addImm(0);
+ MachineMemOperand *LoadMMO = MF.getMachineMemOperand(
+ MachinePointerInfo(MF.getPSVManager().getExternalSymbolCallEntry(ES)),
+ MachineMemOperand::MOLoad, 4, 4);
+ // Load the SP value.
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::LOAD_I32), SPReg)
+ .addImm(2) // p2align
+ .addExternalSymbol(SPSymbol)
+ .addReg(Zero) // addr
+ .addMemOperand(LoadMMO);
+ } else {
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::GET_GLOBAL_I32), SPReg)
+ .addExternalSymbol(SPSymbol);
+ }
+
+ bool HasBP = hasBP(MF);
+ if (HasBP) {
+ auto FI = MF.getInfo<WebAssemblyFunctionInfo>();
+ unsigned BasePtr = MRI.createVirtualRegister(PtrRC);
+ FI->setBasePointerVreg(BasePtr);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY), BasePtr)
+ .addReg(SPReg);
+ }
+ if (StackSize) {
+ // Subtract the frame size
+ unsigned OffsetReg = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
+ .addImm(StackSize);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::SUB_I32),
+ WebAssembly::SP32)
+ .addReg(SPReg)
+ .addReg(OffsetReg);
+ }
+ if (HasBP) {
+ unsigned BitmaskReg = MRI.createVirtualRegister(PtrRC);
+ unsigned Alignment = MFI.getMaxAlignment();
+ assert((1u << countTrailingZeros(Alignment)) == Alignment &&
+ "Alignment must be a power of 2");
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), BitmaskReg)
+ .addImm((int)~(Alignment - 1));
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::AND_I32),
+ WebAssembly::SP32)
+ .addReg(WebAssembly::SP32)
+ .addReg(BitmaskReg);
+ }
+ if (hasFP(MF)) {
+ // Unlike most conventional targets (where FP points to the saved FP),
+ // FP points to the bottom of the fixed-size locals, so we can use positive
+ // offsets in load/store instructions.
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::COPY),
+ WebAssembly::FP32)
+ .addReg(WebAssembly::SP32);
+ }
+ if (StackSize && needsSPWriteback(MF, MFI)) {
+ writeSPToMemory(WebAssembly::SP32, MF, MBB, InsertPt, InsertPt, DL);
+ }
+}
+
+void WebAssemblyFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ auto &MFI = MF.getFrameInfo();
+ uint64_t StackSize = MFI.getStackSize();
+ if (!needsSP(MF, MFI) || !needsSPWriteback(MF, MFI)) return;
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ auto &MRI = MF.getRegInfo();
+ auto InsertPt = MBB.getFirstTerminator();
+ DebugLoc DL;
+
+ if (InsertPt != MBB.end())
+ DL = InsertPt->getDebugLoc();
+
+ // Restore the stack pointer. If we had fixed-size locals, add the offset
+ // subtracted in the prolog.
+ unsigned SPReg = 0;
+ MachineBasicBlock::iterator InsertAddr = InsertPt;
+ if (hasBP(MF)) {
+ auto FI = MF.getInfo<WebAssemblyFunctionInfo>();
+ SPReg = FI->getBasePointerVreg();
+ } else if (StackSize) {
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned OffsetReg = MRI.createVirtualRegister(PtrRC);
+ InsertAddr =
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
+ .addImm(StackSize);
+ // In the epilog we don't need to write the result back to the SP32 physreg
+ // because it won't be used again. We can use a stackified register instead.
+ SPReg = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::ADD_I32), SPReg)
+ .addReg(hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32)
+ .addReg(OffsetReg);
+ } else {
+ SPReg = hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32;
+ }
+
+ writeSPToMemory(SPReg, MF, MBB, InsertAddr, InsertPt, DL);
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
new file mode 100644
index 000000000000..4cc7f5ae058a
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFrameLowering.h
@@ -0,0 +1,57 @@
+// WebAssemblyFrameLowering.h - TargetFrameLowering for WebAssembly -*- C++ -*-/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This class implements WebAssembly-specific bits of
+/// TargetFrameLowering class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYFRAMELOWERING_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYFRAMELOWERING_H
+
+#include "llvm/CodeGen/TargetFrameLowering.h"
+
+namespace llvm {
+class MachineFrameInfo;
+
+class WebAssemblyFrameLowering final : public TargetFrameLowering {
+ public:
+ /// Size of the red zone for the user stack (leaf functions can use this much
+ /// space below the stack pointer without writing it back to memory).
+ // TODO: (ABI) Revisit and decide how large it should be.
+ static const size_t RedZoneSize = 128;
+
+ WebAssemblyFrameLowering()
+ : TargetFrameLowering(StackGrowsDown, /*StackAlignment=*/16,
+ /*LocalAreaOffset=*/0,
+ /*TransientStackAlignment=*/16,
+ /*StackRealignable=*/true) {}
+
+ MachineBasicBlock::iterator eliminateCallFramePseudoInstr(
+ MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
+
+ /// These methods insert prolog and epilog code into the function.
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ bool hasFP(const MachineFunction &MF) const override;
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+
+ private:
+ bool hasBP(const MachineFunction &MF) const;
+ bool needsSP(const MachineFunction &MF, const MachineFrameInfo &MFI) const;
+ bool needsSPWriteback(const MachineFunction &MF,
+ const MachineFrameInfo &MFI) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def
new file mode 100644
index 000000000000..2f0f106ef5b7
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISD.def
@@ -0,0 +1,25 @@
+//- WebAssemblyISD.def - WebAssembly ISD ---------------------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file describes the various WebAssembly ISD node types.
+///
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+HANDLE_NODETYPE(CALL1)
+HANDLE_NODETYPE(CALL0)
+HANDLE_NODETYPE(RETURN)
+HANDLE_NODETYPE(ARGUMENT)
+HANDLE_NODETYPE(Wrapper)
+HANDLE_NODETYPE(BR_IF)
+HANDLE_NODETYPE(BR_TABLE)
+
+// add memory opcodes starting at ISD::FIRST_TARGET_MEMORY_OPCODE here...
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
new file mode 100644
index 000000000000..9f40d35689a5
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp
@@ -0,0 +1,118 @@
+//- WebAssemblyISelDAGToDAG.cpp - A dag to dag inst selector for WebAssembly -//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines an instruction selector for the WebAssembly target.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/IR/Function.h" // To access function attributes.
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-isel"
+
+//===--------------------------------------------------------------------===//
+/// WebAssembly-specific code to select WebAssembly machine instructions for
+/// SelectionDAG operations.
+///
+namespace {
+class WebAssemblyDAGToDAGISel final : public SelectionDAGISel {
+ /// Keep a pointer to the WebAssemblySubtarget around so that we can make the
+ /// right decision when generating code for different targets.
+ const WebAssemblySubtarget *Subtarget;
+
+ bool ForCodeSize;
+
+public:
+ WebAssemblyDAGToDAGISel(WebAssemblyTargetMachine &tm,
+ CodeGenOpt::Level OptLevel)
+ : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr), ForCodeSize(false) {
+ }
+
+ StringRef getPassName() const override {
+ return "WebAssembly Instruction Selection";
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ ForCodeSize = MF.getFunction().hasFnAttribute(Attribute::OptimizeForSize) ||
+ MF.getFunction().hasFnAttribute(Attribute::MinSize);
+ Subtarget = &MF.getSubtarget<WebAssemblySubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
+
+ void Select(SDNode *Node) override;
+
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
+ std::vector<SDValue> &OutOps) override;
+
+// Include the pieces autogenerated from the target description.
+#include "WebAssemblyGenDAGISel.inc"
+
+private:
+ // add select functions here...
+};
+} // end anonymous namespace
+
+void WebAssemblyDAGToDAGISel::Select(SDNode *Node) {
+ // Dump information about the Node being selected.
+ DEBUG(errs() << "Selecting: ");
+ DEBUG(Node->dump(CurDAG));
+ DEBUG(errs() << "\n");
+
+ // If we have a custom node, we already have selected!
+ if (Node->isMachineOpcode()) {
+ DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n");
+ Node->setNodeId(-1);
+ return;
+ }
+
+ // Few custom selection stuff.
+ EVT VT = Node->getValueType(0);
+
+ switch (Node->getOpcode()) {
+ default:
+ break;
+ // If we need WebAssembly-specific selection, it would go here.
+ (void)VT;
+ }
+
+ // Select the default instruction.
+ SelectCode(Node);
+}
+
+bool WebAssemblyDAGToDAGISel::SelectInlineAsmMemoryOperand(
+ const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) {
+ switch (ConstraintID) {
+ case InlineAsm::Constraint_i:
+ case InlineAsm::Constraint_m:
+ // We just support simple memory operands that just have a single address
+ // operand and need no special handling.
+ OutOps.push_back(Op);
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+/// This pass converts a legalized DAG into a WebAssembly-specific DAG, ready
+/// for instruction scheduling.
+FunctionPass *llvm::createWebAssemblyISelDag(WebAssemblyTargetMachine &TM,
+ CodeGenOpt::Level OptLevel) {
+ return new WebAssemblyDAGToDAGISel(TM, OptLevel);
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
new file mode 100644
index 000000000000..299009fa6674
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -0,0 +1,875 @@
+//=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements the WebAssemblyTargetLowering class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyISelLowering.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower"
+
+WebAssemblyTargetLowering::WebAssemblyTargetLowering(
+ const TargetMachine &TM, const WebAssemblySubtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
+ auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
+
+ // Booleans always contain 0 or 1.
+ setBooleanContents(ZeroOrOneBooleanContent);
+ // WebAssembly does not produce floating-point exceptions on normal floating
+ // point operations.
+ setHasFloatingPointExceptions(false);
+ // We don't know the microarchitecture here, so just reduce register pressure.
+ setSchedulingPreference(Sched::RegPressure);
+ // Tell ISel that we have a stack pointer.
+ setStackPointerRegisterToSaveRestore(
+ Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
+ addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
+ addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
+ addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
+ if (Subtarget->hasSIMD128()) {
+ addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
+ addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
+ }
+ // Compute derived properties from the register classes.
+ computeRegisterProperties(Subtarget->getRegisterInfo());
+
+ setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
+ setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
+ setOperationAction(ISD::JumpTable, MVTPtr, Custom);
+ setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
+ setOperationAction(ISD::BRIND, MVT::Other, Custom);
+
+ // Take the default expansion for va_arg, va_copy, and va_end. There is no
+ // default action for va_start, so we do that custom.
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
+
+ for (auto T : {MVT::f32, MVT::f64}) {
+ // Don't expand the floating-point types to constant pools.
+ setOperationAction(ISD::ConstantFP, T, Legal);
+ // Expand floating-point comparisons.
+ for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
+ ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
+ setCondCodeAction(CC, T, Expand);
+ // Expand floating-point library function operators.
+ for (auto Op : {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM,
+ ISD::FMA})
+ setOperationAction(Op, T, Expand);
+ // Note supported floating-point library function operators that otherwise
+ // default to expand.
+ for (auto Op :
+ {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
+ setOperationAction(Op, T, Legal);
+ // Support minnan and maxnan, which otherwise default to expand.
+ setOperationAction(ISD::FMINNAN, T, Legal);
+ setOperationAction(ISD::FMAXNAN, T, Legal);
+ // WebAssembly currently has no builtin f16 support.
+ setOperationAction(ISD::FP16_TO_FP, T, Expand);
+ setOperationAction(ISD::FP_TO_FP16, T, Expand);
+ setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
+ setTruncStoreAction(T, MVT::f16, Expand);
+ }
+
+ for (auto T : {MVT::i32, MVT::i64}) {
+ // Expand unavailable integer operations.
+ for (auto Op :
+ {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI,
+ ISD::MULHS, ISD::MULHU, ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS,
+ ISD::SRA_PARTS, ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC,
+ ISD::SUBE}) {
+ setOperationAction(Op, T, Expand);
+ }
+ }
+
+ // As a special case, these operators use the type to mean the type to
+ // sign-extend from.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+ if (!Subtarget->hasAtomics()) {
+ // The Atomics feature includes signext intructions.
+ for (auto T : {MVT::i8, MVT::i16, MVT::i32})
+ setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
+ }
+
+ // Dynamic stack allocation: use the default expansion.
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
+
+ setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
+ setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
+
+ // Expand these forms; we pattern-match the forms that we can handle in isel.
+ for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
+ for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
+ setOperationAction(Op, T, Expand);
+
+ // We have custom switch handling.
+ setOperationAction(ISD::BR_JT, MVT::Other, Custom);
+
+ // WebAssembly doesn't have:
+ // - Floating-point extending loads.
+ // - Floating-point truncating stores.
+ // - i1 extending loads.
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ for (auto T : MVT::integer_valuetypes())
+ for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
+ setLoadExtAction(Ext, T, MVT::i1, Promote);
+
+ // Trap lowers to wasm unreachable
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
+ setMaxAtomicSizeInBitsSupported(64);
+}
+
+FastISel *WebAssemblyTargetLowering::createFastISel(
+ FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
+ return WebAssembly::createFastISel(FuncInfo, LibInfo);
+}
+
+bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
+ const GlobalAddressSDNode * /*GA*/) const {
+ // All offsets can be folded.
+ return true;
+}
+
+MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
+ EVT VT) const {
+ unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
+ if (BitWidth > 1 && BitWidth < 8) BitWidth = 8;
+
+ if (BitWidth > 64) {
+ // The shift will be lowered to a libcall, and compiler-rt libcalls expect
+ // the count to be an i32.
+ BitWidth = 32;
+ assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
+ "32-bit shift counts ought to be enough for anyone");
+ }
+
+ MVT Result = MVT::getIntegerVT(BitWidth);
+ assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
+ "Unable to represent scalar shift amount type");
+ return Result;
+}
+
+// Lower an fp-to-int conversion operator from the LLVM opcode, which has an
+// undefined result on invalid/overflow, to the WebAssembly opcode, which
+// traps on invalid/overflow.
+static MachineBasicBlock *
+LowerFPToInt(
+ MachineInstr &MI,
+ DebugLoc DL,
+ MachineBasicBlock *BB,
+ const TargetInstrInfo &TII,
+ bool IsUnsigned,
+ bool Int64,
+ bool Float64,
+ unsigned LoweredOpcode
+) {
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+
+ unsigned OutReg = MI.getOperand(0).getReg();
+ unsigned InReg = MI.getOperand(1).getReg();
+
+ unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
+ unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
+ unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
+ unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
+ unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
+ unsigned Eqz = WebAssembly::EQZ_I32;
+ unsigned And = WebAssembly::AND_I32;
+ int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
+ int64_t Substitute = IsUnsigned ? 0 : Limit;
+ double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
+ auto &Context = BB->getParent()->getFunction().getContext();
+ Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
+
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVM_BB);
+
+ MachineFunction::iterator It = ++BB->getIterator();
+ F->insert(It, FalseMBB);
+ F->insert(It, TrueMBB);
+ F->insert(It, DoneMBB);
+
+ // Transfer the remainder of BB and its successor edges to DoneMBB.
+ DoneMBB->splice(DoneMBB->begin(), BB,
+ std::next(MachineBasicBlock::iterator(MI)),
+ BB->end());
+ DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
+
+ BB->addSuccessor(TrueMBB);
+ BB->addSuccessor(FalseMBB);
+ TrueMBB->addSuccessor(DoneMBB);
+ FalseMBB->addSuccessor(DoneMBB);
+
+ unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
+ Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
+ Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
+ CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
+ TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
+
+ MI.eraseFromParent();
+ // For signed numbers, we can do a single comparison to determine whether
+ // fabs(x) is within range.
+ if (IsUnsigned) {
+ Tmp0 = InReg;
+ } else {
+ BuildMI(BB, DL, TII.get(Abs), Tmp0)
+ .addReg(InReg);
+ }
+ BuildMI(BB, DL, TII.get(FConst), Tmp1)
+ .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
+ BuildMI(BB, DL, TII.get(LT), CmpReg)
+ .addReg(Tmp0)
+ .addReg(Tmp1);
+
+ // For unsigned numbers, we have to do a separate comparison with zero.
+ if (IsUnsigned) {
+ Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
+ unsigned SecondCmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ unsigned AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ BuildMI(BB, DL, TII.get(FConst), Tmp1)
+ .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
+ BuildMI(BB, DL, TII.get(GE), SecondCmpReg)
+ .addReg(Tmp0)
+ .addReg(Tmp1);
+ BuildMI(BB, DL, TII.get(And), AndReg)
+ .addReg(CmpReg)
+ .addReg(SecondCmpReg);
+ CmpReg = AndReg;
+ }
+
+ BuildMI(BB, DL, TII.get(Eqz), EqzReg)
+ .addReg(CmpReg);
+
+ // Create the CFG diamond to select between doing the conversion or using
+ // the substitute value.
+ BuildMI(BB, DL, TII.get(WebAssembly::BR_IF))
+ .addMBB(TrueMBB)
+ .addReg(EqzReg);
+ BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg)
+ .addReg(InReg);
+ BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR))
+ .addMBB(DoneMBB);
+ BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg)
+ .addImm(Substitute);
+ BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
+ .addReg(FalseReg)
+ .addMBB(FalseMBB)
+ .addReg(TrueReg)
+ .addMBB(TrueMBB);
+
+ return DoneMBB;
+}
+
+MachineBasicBlock *
+WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
+ MachineInstr &MI,
+ MachineBasicBlock *BB
+) const {
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = MI.getDebugLoc();
+
+ switch (MI.getOpcode()) {
+ default: llvm_unreachable("Unexpected instr type to insert");
+ case WebAssembly::FP_TO_SINT_I32_F32:
+ return LowerFPToInt(MI, DL, BB, TII, false, false, false,
+ WebAssembly::I32_TRUNC_S_F32);
+ case WebAssembly::FP_TO_UINT_I32_F32:
+ return LowerFPToInt(MI, DL, BB, TII, true, false, false,
+ WebAssembly::I32_TRUNC_U_F32);
+ case WebAssembly::FP_TO_SINT_I64_F32:
+ return LowerFPToInt(MI, DL, BB, TII, false, true, false,
+ WebAssembly::I64_TRUNC_S_F32);
+ case WebAssembly::FP_TO_UINT_I64_F32:
+ return LowerFPToInt(MI, DL, BB, TII, true, true, false,
+ WebAssembly::I64_TRUNC_U_F32);
+ case WebAssembly::FP_TO_SINT_I32_F64:
+ return LowerFPToInt(MI, DL, BB, TII, false, false, true,
+ WebAssembly::I32_TRUNC_S_F64);
+ case WebAssembly::FP_TO_UINT_I32_F64:
+ return LowerFPToInt(MI, DL, BB, TII, true, false, true,
+ WebAssembly::I32_TRUNC_U_F64);
+ case WebAssembly::FP_TO_SINT_I64_F64:
+ return LowerFPToInt(MI, DL, BB, TII, false, true, true,
+ WebAssembly::I64_TRUNC_S_F64);
+ case WebAssembly::FP_TO_UINT_I64_F64:
+ return LowerFPToInt(MI, DL, BB, TII, true, true, true,
+ WebAssembly::I64_TRUNC_U_F64);
+ llvm_unreachable("Unexpected instruction to emit with custom inserter");
+ }
+}
+
+const char *WebAssemblyTargetLowering::getTargetNodeName(
+ unsigned Opcode) const {
+ switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
+ case WebAssemblyISD::FIRST_NUMBER:
+ break;
+#define HANDLE_NODETYPE(NODE) \
+ case WebAssemblyISD::NODE: \
+ return "WebAssemblyISD::" #NODE;
+#include "WebAssemblyISD.def"
+#undef HANDLE_NODETYPE
+ }
+ return nullptr;
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
+ const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
+ // First, see if this is a constraint that directly corresponds to a
+ // WebAssembly register class.
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r':
+ assert(VT != MVT::iPTR && "Pointer MVT not expected here");
+ if (Subtarget->hasSIMD128() && VT.isVector()) {
+ if (VT.getSizeInBits() == 128)
+ return std::make_pair(0U, &WebAssembly::V128RegClass);
+ }
+ if (VT.isInteger() && !VT.isVector()) {
+ if (VT.getSizeInBits() <= 32)
+ return std::make_pair(0U, &WebAssembly::I32RegClass);
+ if (VT.getSizeInBits() <= 64)
+ return std::make_pair(0U, &WebAssembly::I64RegClass);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
+ // Assume ctz is a relatively cheap operation.
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
+ // Assume clz is a relatively cheap operation.
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM,
+ Type *Ty,
+ unsigned AS,
+ Instruction *I) const {
+ // WebAssembly offsets are added as unsigned without wrapping. The
+ // isLegalAddressingMode gives us no way to determine if wrapping could be
+ // happening, so we approximate this by accepting only non-negative offsets.
+ if (AM.BaseOffs < 0) return false;
+
+ // WebAssembly has no scale register operands.
+ if (AM.Scale != 0) return false;
+
+ // Everything else is legal.
+ return true;
+}
+
+bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
+ EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const {
+ // WebAssembly supports unaligned accesses, though it should be declared
+ // with the p2align attribute on loads and stores which do so, and there
+ // may be a performance impact. We tell LLVM they're "fast" because
+ // for the kinds of things that LLVM uses this for (merging adjacent stores
+ // of constants, etc.), WebAssembly implementations will either want the
+ // unaligned access or they'll split anyway.
+ if (Fast) *Fast = true;
+ return true;
+}
+
+bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
+ AttributeList Attr) const {
+ // The current thinking is that wasm engines will perform this optimization,
+ // so we can save on code size.
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Lowering private implementation.
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Lowering Code
+//===----------------------------------------------------------------------===//
+
+static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *msg) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ DAG.getContext()->diagnose(
+ DiagnosticInfoUnsupported(MF.getFunction(), msg, DL.getDebugLoc()));
+}
+
+// Test whether the given calling convention is supported.
+static bool CallingConvSupported(CallingConv::ID CallConv) {
+ // We currently support the language-independent target-independent
+ // conventions. We don't yet have a way to annotate calls with properties like
+ // "cold", and we don't have any call-clobbered registers, so these are mostly
+ // all handled the same.
+ return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
+ CallConv == CallingConv::Cold ||
+ CallConv == CallingConv::PreserveMost ||
+ CallConv == CallingConv::PreserveAll ||
+ CallConv == CallingConv::CXX_FAST_TLS;
+}
+
+SDValue WebAssemblyTargetLowering::LowerCall(
+ CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc DL = CLI.DL;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ MachineFunction &MF = DAG.getMachineFunction();
+ auto Layout = MF.getDataLayout();
+
+ CallingConv::ID CallConv = CLI.CallConv;
+ if (!CallingConvSupported(CallConv))
+ fail(DL, DAG,
+ "WebAssembly doesn't support language-specific or target-specific "
+ "calling conventions yet");
+ if (CLI.IsPatchPoint)
+ fail(DL, DAG, "WebAssembly doesn't support patch point yet");
+
+ // WebAssembly doesn't currently support explicit tail calls. If they are
+ // required, fail. Otherwise, just disable them.
+ if ((CallConv == CallingConv::Fast && CLI.IsTailCall &&
+ MF.getTarget().Options.GuaranteedTailCallOpt) ||
+ (CLI.CS && CLI.CS.isMustTailCall()))
+ fail(DL, DAG, "WebAssembly doesn't support tail call yet");
+ CLI.IsTailCall = false;
+
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ if (Ins.size() > 1)
+ fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
+
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ for (unsigned i = 0; i < Outs.size(); ++i) {
+ const ISD::OutputArg &Out = Outs[i];
+ SDValue &OutVal = OutVals[i];
+ if (Out.Flags.isNest())
+ fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
+ if (Out.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
+ if (Out.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
+ if (Out.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
+ if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
+ auto &MFI = MF.getFrameInfo();
+ int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
+ Out.Flags.getByValAlign(),
+ /*isSS=*/false);
+ SDValue SizeNode =
+ DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
+ SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
+ Chain = DAG.getMemcpy(
+ Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
+ /*isVolatile*/ false, /*AlwaysInline=*/false,
+ /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
+ OutVal = FINode;
+ }
+ }
+
+ bool IsVarArg = CLI.IsVarArg;
+ unsigned NumFixedArgs = CLI.NumFixedArgs;
+
+ auto PtrVT = getPointerTy(Layout);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
+
+ if (IsVarArg) {
+ // Outgoing non-fixed arguments are placed in a buffer. First
+ // compute their offsets and the total amount of buffer space needed.
+ for (SDValue Arg :
+ make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
+ EVT VT = Arg.getValueType();
+ assert(VT != MVT::iPTR && "Legalized args should be concrete");
+ Type *Ty = VT.getTypeForEVT(*DAG.getContext());
+ unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
+ Layout.getABITypeAlignment(Ty));
+ CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
+ Offset, VT.getSimpleVT(),
+ CCValAssign::Full));
+ }
+ }
+
+ unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
+
+ SDValue FINode;
+ if (IsVarArg && NumBytes) {
+ // For non-fixed arguments, next emit stores to store the argument values
+ // to the stack buffer at the offsets computed above.
+ int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
+ Layout.getStackAlignment(),
+ /*isSS=*/false);
+ unsigned ValNo = 0;
+ SmallVector<SDValue, 8> Chains;
+ for (SDValue Arg :
+ make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
+ assert(ArgLocs[ValNo].getValNo() == ValNo &&
+ "ArgLocs should remain in order and only hold varargs args");
+ unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
+ FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
+ SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
+ DAG.getConstant(Offset, DL, PtrVT));
+ Chains.push_back(DAG.getStore(
+ Chain, DL, Arg, Add,
+ MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
+ }
+ if (!Chains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
+ } else if (IsVarArg) {
+ FINode = DAG.getIntPtrConstant(0, DL);
+ }
+
+ // Compute the operands for the CALLn node.
+ SmallVector<SDValue, 16> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+
+ // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
+ // isn't reliable.
+ Ops.append(OutVals.begin(),
+ IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
+ // Add a pointer to the vararg buffer.
+ if (IsVarArg) Ops.push_back(FINode);
+
+ SmallVector<EVT, 8> InTys;
+ for (const auto &In : Ins) {
+ assert(!In.Flags.isByVal() && "byval is not valid for return values");
+ assert(!In.Flags.isNest() && "nest is not valid for return values");
+ if (In.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
+ if (In.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
+ if (In.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG,
+ "WebAssembly hasn't implemented cons regs last return values");
+ // Ignore In.getOrigAlign() because all our arguments are passed in
+ // registers.
+ InTys.push_back(In.VT);
+ }
+ InTys.push_back(MVT::Other);
+ SDVTList InTyList = DAG.getVTList(InTys);
+ SDValue Res =
+ DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
+ DL, InTyList, Ops);
+ if (Ins.empty()) {
+ Chain = Res;
+ } else {
+ InVals.push_back(Res);
+ Chain = Res.getValue(1);
+ }
+
+ return Chain;
+}
+
+bool WebAssemblyTargetLowering::CanLowerReturn(
+ CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext & /*Context*/) const {
+ // WebAssembly can't currently handle returning tuples.
+ return Outs.size() <= 1;
+}
+
+SDValue WebAssemblyTargetLowering::LowerReturn(
+ SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+ SelectionDAG &DAG) const {
+ assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
+ if (!CallingConvSupported(CallConv))
+ fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
+
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+ RetOps.append(OutVals.begin(), OutVals.end());
+ Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
+
+ // Record the number and types of the return values.
+ for (const ISD::OutputArg &Out : Outs) {
+ assert(!Out.Flags.isByVal() && "byval is not valid for return values");
+ assert(!Out.Flags.isNest() && "nest is not valid for return values");
+ assert(Out.IsFixed && "non-fixed return value is not valid");
+ if (Out.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
+ if (Out.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
+ if (Out.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
+ }
+
+ return Chain;
+}
+
+SDValue WebAssemblyTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ if (!CallingConvSupported(CallConv))
+ fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
+
+ MachineFunction &MF = DAG.getMachineFunction();
+ auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
+
+ // Set up the incoming ARGUMENTS value, which serves to represent the liveness
+ // of the incoming values before they're represented by virtual registers.
+ MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
+
+ for (const ISD::InputArg &In : Ins) {
+ if (In.Flags.isInAlloca())
+ fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
+ if (In.Flags.isNest())
+ fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
+ if (In.Flags.isInConsecutiveRegs())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
+ if (In.Flags.isInConsecutiveRegsLast())
+ fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
+ // Ignore In.getOrigAlign() because all our arguments are passed in
+ // registers.
+ InVals.push_back(
+ In.Used
+ ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
+ DAG.getTargetConstant(InVals.size(), DL, MVT::i32))
+ : DAG.getUNDEF(In.VT));
+
+ // Record the number and types of arguments.
+ MFI->addParam(In.VT);
+ }
+
+ // Varargs are copied into a buffer allocated by the caller, and a pointer to
+ // the buffer is passed as an argument.
+ if (IsVarArg) {
+ MVT PtrVT = getPointerTy(MF.getDataLayout());
+ unsigned VarargVreg =
+ MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
+ MFI->setVarargBufferVreg(VarargVreg);
+ Chain = DAG.getCopyToReg(
+ Chain, DL, VarargVreg,
+ DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
+ DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
+ MFI->addParam(PtrVT);
+ }
+
+ // Record the number and types of results.
+ SmallVector<MVT, 4> Params;
+ SmallVector<MVT, 4> Results;
+ ComputeSignatureVTs(MF.getFunction(), DAG.getTarget(), Params, Results);
+ for (MVT VT : Results)
+ MFI->addResult(VT);
+
+ return Chain;
+}
+
+//===----------------------------------------------------------------------===//
+// Custom lowering hooks.
+//===----------------------------------------------------------------------===//
+
+SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ switch (Op.getOpcode()) {
+ default:
+ llvm_unreachable("unimplemented operation lowering");
+ return SDValue();
+ case ISD::FrameIndex:
+ return LowerFrameIndex(Op, DAG);
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::ExternalSymbol:
+ return LowerExternalSymbol(Op, DAG);
+ case ISD::JumpTable:
+ return LowerJumpTable(Op, DAG);
+ case ISD::BR_JT:
+ return LowerBR_JT(Op, DAG);
+ case ISD::VASTART:
+ return LowerVASTART(Op, DAG);
+ case ISD::BlockAddress:
+ case ISD::BRIND:
+ fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
+ return SDValue();
+ case ISD::RETURNADDR: // Probably nothing meaningful can be returned here.
+ fail(DL, DAG, "WebAssembly hasn't implemented __builtin_return_address");
+ return SDValue();
+ case ISD::FRAMEADDR:
+ return LowerFRAMEADDR(Op, DAG);
+ case ISD::CopyToReg:
+ return LowerCopyToReg(Op, DAG);
+ }
+}
+
+SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Src = Op.getOperand(2);
+ if (isa<FrameIndexSDNode>(Src.getNode())) {
+ // CopyToReg nodes don't support FrameIndex operands. Other targets select
+ // the FI to some LEA-like instruction, but since we don't have that, we
+ // need to insert some kind of instruction that can take an FI operand and
+ // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
+ // copy_local between Op and its FI operand.
+ SDValue Chain = Op.getOperand(0);
+ SDLoc DL(Op);
+ unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
+ EVT VT = Src.getValueType();
+ SDValue Copy(
+ DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
+ : WebAssembly::COPY_I64,
+ DL, VT, Src),
+ 0);
+ return Op.getNode()->getNumValues() == 1
+ ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
+ : DAG.getCopyToReg(Chain, DL, Reg, Copy, Op.getNumOperands() == 4
+ ? Op.getOperand(3)
+ : SDValue());
+ }
+ return SDValue();
+}
+
+SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
+ SelectionDAG &DAG) const {
+ int FI = cast<FrameIndexSDNode>(Op)->getIndex();
+ return DAG.getTargetFrameIndex(FI, Op.getValueType());
+}
+
+SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
+ SelectionDAG &DAG) const {
+ // Non-zero depths are not supported by WebAssembly currently. Use the
+ // legalizer's default expansion, which is to return 0 (what this function is
+ // documented to do).
+ if (Op.getConstantOperandVal(0) > 0)
+ return SDValue();
+
+ DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
+ EVT VT = Op.getValueType();
+ unsigned FP =
+ Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
+ return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
+}
+
+SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const auto *GA = cast<GlobalAddressSDNode>(Op);
+ EVT VT = Op.getValueType();
+ assert(GA->getTargetFlags() == 0 &&
+ "Unexpected target flags on generic GlobalAddressSDNode");
+ if (GA->getAddressSpace() != 0)
+ fail(DL, DAG, "WebAssembly only expects the 0 address space");
+ return DAG.getNode(
+ WebAssemblyISD::Wrapper, DL, VT,
+ DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset()));
+}
+
+SDValue WebAssemblyTargetLowering::LowerExternalSymbol(
+ SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ const auto *ES = cast<ExternalSymbolSDNode>(Op);
+ EVT VT = Op.getValueType();
+ assert(ES->getTargetFlags() == 0 &&
+ "Unexpected target flags on generic ExternalSymbolSDNode");
+ // Set the TargetFlags to 0x1 which indicates that this is a "function"
+ // symbol rather than a data symbol. We do this unconditionally even though
+ // we don't know anything about the symbol other than its name, because all
+ // external symbols used in target-independent SelectionDAG code are for
+ // functions.
+ return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
+ DAG.getTargetExternalSymbol(ES->getSymbol(), VT,
+ /*TargetFlags=*/0x1));
+}
+
+SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
+ SelectionDAG &DAG) const {
+ // There's no need for a Wrapper node because we always incorporate a jump
+ // table operand into a BR_TABLE instruction, rather than ever
+ // materializing it in a register.
+ const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+ return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
+ JT->getTargetFlags());
+}
+
+SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Chain = Op.getOperand(0);
+ const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
+ SDValue Index = Op.getOperand(2);
+ assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
+
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Index);
+
+ MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
+ const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
+
+ // Add an operand for each case.
+ for (auto MBB : MBBs) Ops.push_back(DAG.getBasicBlock(MBB));
+
+ // TODO: For now, we just pick something arbitrary for a default case for now.
+ // We really want to sniff out the guard and put in the real default case (and
+ // delete the guard).
+ Ops.push_back(DAG.getBasicBlock(MBBs[0]));
+
+ return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
+}
+
+SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
+
+ auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+
+ SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
+ MFI->getVarargBufferVreg(), PtrVT);
+ return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
+ MachinePointerInfo(SV), 0);
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Optimization Hooks
+//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
new file mode 100644
index 000000000000..7bb8e71ab974
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
@@ -0,0 +1,102 @@
+//- WebAssemblyISelLowering.h - WebAssembly DAG Lowering Interface -*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the interfaces that WebAssembly uses to lower LLVM
+/// code into a selection DAG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYISELLOWERING_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYISELLOWERING_H
+
+#include "llvm/CodeGen/TargetLowering.h"
+
+namespace llvm {
+
+namespace WebAssemblyISD {
+
+enum NodeType : unsigned {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+#define HANDLE_NODETYPE(NODE) NODE,
+#include "WebAssemblyISD.def"
+#undef HANDLE_NODETYPE
+};
+
+} // end namespace WebAssemblyISD
+
+class WebAssemblySubtarget;
+class WebAssemblyTargetMachine;
+
+class WebAssemblyTargetLowering final : public TargetLowering {
+ public:
+ WebAssemblyTargetLowering(const TargetMachine &TM,
+ const WebAssemblySubtarget &STI);
+
+ private:
+ /// Keep a pointer to the WebAssemblySubtarget around so that we can make the
+ /// right decision when generating code for different targets.
+ const WebAssemblySubtarget *Subtarget;
+
+ FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo) const override;
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
+ MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
+ const char *getTargetNodeName(unsigned Opcode) const override;
+ std::pair<unsigned, const TargetRegisterClass *> getRegForInlineAsmConstraint(
+ const TargetRegisterInfo *TRI, StringRef Constraint,
+ MVT VT) const override;
+ bool isCheapToSpeculateCttz() const override;
+ bool isCheapToSpeculateCtlz() const override;
+ bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
+ unsigned AS,
+ Instruction *I = nullptr) const override;
+ bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align,
+ bool *Fast) const override;
+ bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
+
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
+ SelectionDAG &DAG) const override;
+ SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &DL, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+
+ // Custom lowering hooks.
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+ SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const;
+};
+
+namespace WebAssembly {
+FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo);
+} // end namespace WebAssembly
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
new file mode 100644
index 000000000000..a49172df158f
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
@@ -0,0 +1,214 @@
+// WebAssemblyInstrAtomics.td-WebAssembly Atomic codegen support-*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly Atomic operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Atomic loads
+//===----------------------------------------------------------------------===//
+
+let Defs = [ARGUMENTS] in {
+def ATOMIC_LOAD_I32 : WebAssemblyLoad<I32, "i32.atomic.load", 0xfe10>;
+def ATOMIC_LOAD_I64 : WebAssemblyLoad<I64, "i64.atomic.load", 0xfe11>;
+} // Defs = [ARGUMENTS]
+
+// Select loads with no constant offset.
+let Predicates = [HasAtomics] in {
+def : LoadPatNoOffset<i32, atomic_load_32, ATOMIC_LOAD_I32>;
+def : LoadPatNoOffset<i64, atomic_load_64, ATOMIC_LOAD_I64>;
+
+// Select loads with a constant offset.
+
+// Pattern with address + immediate offset
+def : LoadPatImmOff<i32, atomic_load_32, regPlusImm, ATOMIC_LOAD_I32>;
+def : LoadPatImmOff<i64, atomic_load_64, regPlusImm, ATOMIC_LOAD_I64>;
+def : LoadPatImmOff<i32, atomic_load_32, or_is_add, ATOMIC_LOAD_I32>;
+def : LoadPatImmOff<i64, atomic_load_64, or_is_add, ATOMIC_LOAD_I64>;
+
+def : LoadPatGlobalAddr<i32, atomic_load_32, ATOMIC_LOAD_I32>;
+def : LoadPatGlobalAddr<i64, atomic_load_64, ATOMIC_LOAD_I64>;
+
+def : LoadPatExternalSym<i32, atomic_load_32, ATOMIC_LOAD_I32>;
+def : LoadPatExternalSym<i64, atomic_load_64, ATOMIC_LOAD_I64>;
+
+
+// Select loads with just a constant offset.
+def : LoadPatOffsetOnly<i32, atomic_load_32, ATOMIC_LOAD_I32>;
+def : LoadPatOffsetOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>;
+
+def : LoadPatGlobalAddrOffOnly<i32, atomic_load_32, ATOMIC_LOAD_I32>;
+def : LoadPatGlobalAddrOffOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>;
+
+def : LoadPatExternSymOffOnly<i32, atomic_load_32, ATOMIC_LOAD_I32>;
+def : LoadPatExternSymOffOnly<i64, atomic_load_64, ATOMIC_LOAD_I64>;
+
+} // Predicates = [HasAtomics]
+
+// Extending loads. Note that there are only zero-extending atomic loads, no
+// sign-extending loads.
+let Defs = [ARGUMENTS] in {
+def ATOMIC_LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load8_u", 0xfe12>;
+def ATOMIC_LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.atomic.load16_u", 0xfe13>;
+def ATOMIC_LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load8_u", 0xfe14>;
+def ATOMIC_LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load16_u", 0xfe15>;
+def ATOMIC_LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.atomic.load32_u", 0xfe16>;
+} // Defs = [ARGUMENTS]
+
+// Fragments for exending loads. These are different from regular loads because
+// the SDNodes are derived from AtomicSDNode rather than LoadSDNode and
+// therefore don't have the extension type field. So instead of matching that,
+// we match the patterns that the type legalizer expands them to.
+
+// We directly match zext patterns and select the zext atomic loads.
+// i32 (zext (i8 (atomic_load_8))) gets legalized to
+// i32 (and (i32 (atomic_load_8)), 255)
+// These can be selected to a single zero-extending atomic load instruction.
+def zext_aload_8 : PatFrag<(ops node:$addr),
+ (and (i32 (atomic_load_8 node:$addr)), 255)>;
+def zext_aload_16 : PatFrag<(ops node:$addr),
+ (and (i32 (atomic_load_16 node:$addr)), 65535)>;
+// Unlike regular loads, extension to i64 is handled differently than i32.
+// i64 (zext (i8 (atomic_load_8))) gets legalized to
+// i64 (and (i64 (anyext (i32 (atomic_load_8)))), 255)
+def zext_aload_8_64 :
+ PatFrag<(ops node:$addr),
+ (and (i64 (anyext (i32 (atomic_load_8 node:$addr)))), 255)>;
+def zext_aload_16_64 :
+ PatFrag<(ops node:$addr),
+ (and (i64 (anyext (i32 (atomic_load_16 node:$addr)))), 65535)>;
+def zext_aload_32_64 :
+ PatFrag<(ops node:$addr),
+ (zext (i32 (atomic_load node:$addr)))>;
+
+// We don't have single sext atomic load instructions. So for sext loads, we
+// match bare subword loads (for 32-bit results) and anyext loads (for 64-bit
+// results) and select a zext load; the next instruction will be sext_inreg
+// which is selected by itself.
+def anyext_aload_8_64 :
+ PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_8 node:$addr)))>;
+def anyext_aload_16_64 :
+ PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_16 node:$addr)))>;
+
+let Predicates = [HasAtomics] in {
+// Select zero-extending loads with no constant offset.
+def : LoadPatNoOffset<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatNoOffset<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatNoOffset<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatNoOffset<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+def : LoadPatNoOffset<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>;
+
+// Select sign-extending loads with no constant offset
+def : LoadPatNoOffset<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatNoOffset<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatNoOffset<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatNoOffset<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+// 32->64 sext load gets selected as i32.atomic.load, i64.extend_s/i64
+
+
+// Zero-extending loads with constant offset
+def : LoadPatImmOff<i32, zext_aload_8, regPlusImm, ATOMIC_LOAD8_U_I32>;
+def : LoadPatImmOff<i32, zext_aload_16, regPlusImm, ATOMIC_LOAD16_U_I32>;
+def : LoadPatImmOff<i32, zext_aload_8, or_is_add, ATOMIC_LOAD8_U_I32>;
+def : LoadPatImmOff<i32, zext_aload_16, or_is_add, ATOMIC_LOAD16_U_I32>;
+def : LoadPatImmOff<i64, zext_aload_8_64, regPlusImm, ATOMIC_LOAD8_U_I64>;
+def : LoadPatImmOff<i64, zext_aload_16_64, regPlusImm, ATOMIC_LOAD16_U_I64>;
+def : LoadPatImmOff<i64, zext_aload_32_64, regPlusImm, ATOMIC_LOAD32_U_I64>;
+def : LoadPatImmOff<i64, zext_aload_8_64, or_is_add, ATOMIC_LOAD8_U_I64>;
+def : LoadPatImmOff<i64, zext_aload_16_64, or_is_add, ATOMIC_LOAD16_U_I64>;
+def : LoadPatImmOff<i64, zext_aload_32_64, or_is_add, ATOMIC_LOAD32_U_I64>;
+
+// Sign-extending loads with constant offset
+def : LoadPatImmOff<i32, atomic_load_8, regPlusImm, ATOMIC_LOAD8_U_I32>;
+def : LoadPatImmOff<i32, atomic_load_16, regPlusImm, ATOMIC_LOAD16_U_I32>;
+def : LoadPatImmOff<i32, atomic_load_8, or_is_add, ATOMIC_LOAD8_U_I32>;
+def : LoadPatImmOff<i32, atomic_load_16, or_is_add, ATOMIC_LOAD16_U_I32>;
+def : LoadPatImmOff<i64, anyext_aload_8_64, regPlusImm, ATOMIC_LOAD8_U_I64>;
+def : LoadPatImmOff<i64, anyext_aload_16_64, regPlusImm, ATOMIC_LOAD16_U_I64>;
+def : LoadPatImmOff<i64, anyext_aload_8_64, or_is_add, ATOMIC_LOAD8_U_I64>;
+def : LoadPatImmOff<i64, anyext_aload_16_64, or_is_add, ATOMIC_LOAD16_U_I64>;
+// No 32->64 patterns, just use i32.atomic.load and i64.extend_s/i64
+
+def : LoadPatGlobalAddr<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatGlobalAddr<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatGlobalAddr<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatGlobalAddr<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+def : LoadPatGlobalAddr<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>;
+def : LoadPatGlobalAddr<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatGlobalAddr<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatGlobalAddr<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatGlobalAddr<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+
+def : LoadPatExternalSym<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatExternalSym<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatExternalSym<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatExternalSym<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+def : LoadPatExternalSym<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>;
+def : LoadPatExternalSym<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatExternalSym<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatExternalSym<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatExternalSym<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+
+
+// Extending loads with just a constant offset
+def : LoadPatOffsetOnly<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatOffsetOnly<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatOffsetOnly<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatOffsetOnly<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+def : LoadPatOffsetOnly<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>;
+def : LoadPatOffsetOnly<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatOffsetOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatOffsetOnly<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatOffsetOnly<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+
+def : LoadPatGlobalAddrOffOnly<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+
+def : LoadPatExternSymOffOnly<i32, zext_aload_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatExternSymOffOnly<i32, zext_aload_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatExternSymOffOnly<i64, zext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatExternSymOffOnly<i64, zext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+def : LoadPatExternSymOffOnly<i64, zext_aload_32_64, ATOMIC_LOAD32_U_I64>;
+def : LoadPatExternSymOffOnly<i32, atomic_load_8, ATOMIC_LOAD8_U_I32>;
+def : LoadPatExternSymOffOnly<i32, atomic_load_16, ATOMIC_LOAD16_U_I32>;
+def : LoadPatExternSymOffOnly<i64, anyext_aload_8_64, ATOMIC_LOAD8_U_I64>;
+def : LoadPatExternSymOffOnly<i64, anyext_aload_16_64, ATOMIC_LOAD16_U_I64>;
+
+
+} // Predicates = [HasAtomics]
+
+//===----------------------------------------------------------------------===//
+// Atomic stores
+//===----------------------------------------------------------------------===//
+
+// TODO: add atomic stores here...
+
+//===----------------------------------------------------------------------===//
+// Low-level exclusive operations
+//===----------------------------------------------------------------------===//
+
+// TODO: add exclusive operations here...
+
+// Load-exclusives.
+
+// Store-exclusives.
+
+// Store-release-exclusives.
+
+// And clear exclusive.
+
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td
new file mode 100644
index 000000000000..6b45839c14b0
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrCall.td
@@ -0,0 +1,136 @@
+//===- WebAssemblyInstrCall.td-WebAssembly Call codegen support -*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly Call operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+// TODO: addr64: These currently assume the callee address is 32-bit.
+
+let Defs = [ARGUMENTS] in {
+
+// Call sequence markers. These have an immediate which represents the amount of
+// stack space to allocate or free, which is used for varargs lowering.
+let Uses = [SP32, SP64], Defs = [SP32, SP64], isCodeGenOnly = 1 in {
+def ADJCALLSTACKDOWN : I<(outs), (ins i32imm:$amt, i32imm:$amt2),
+ [(WebAssemblycallseq_start timm:$amt, timm:$amt2)]>;
+def ADJCALLSTACKUP : I<(outs), (ins i32imm:$amt, i32imm:$amt2),
+ [(WebAssemblycallseq_end timm:$amt, timm:$amt2)]>;
+} // isCodeGenOnly = 1
+
+multiclass CALL<WebAssemblyRegClass vt, string prefix> {
+ def CALL_#vt : I<(outs vt:$dst), (ins function32_op:$callee, variable_ops),
+ [(set vt:$dst, (WebAssemblycall1 (i32 imm:$callee)))],
+ !strconcat(prefix, "call\t$dst, $callee"),
+ 0x10>;
+
+ let isCodeGenOnly = 1 in {
+ def PCALL_INDIRECT_#vt : I<(outs vt:$dst), (ins I32:$callee, variable_ops),
+ [(set vt:$dst, (WebAssemblycall1 I32:$callee))],
+ "PSEUDO CALL INDIRECT\t$callee">;
+ } // isCodeGenOnly = 1
+
+ def CALL_INDIRECT_#vt : I<(outs vt:$dst),
+ (ins TypeIndex:$type, i32imm:$flags, variable_ops),
+ [],
+ !strconcat(prefix, "call_indirect\t$dst"),
+ 0x11>;
+}
+
+multiclass SIMD_CALL<ValueType vt, string prefix> {
+ def CALL_#vt : SIMD_I<(outs V128:$dst), (ins function32_op:$callee, variable_ops),
+ [(set (vt V128:$dst),
+ (WebAssemblycall1 (i32 imm:$callee)))],
+ !strconcat(prefix, "call\t$dst, $callee"),
+ 0x10>;
+
+ let isCodeGenOnly = 1 in {
+ def PCALL_INDIRECT_#vt : SIMD_I<(outs V128:$dst),
+ (ins I32:$callee, variable_ops),
+ [(set (vt V128:$dst),
+ (WebAssemblycall1 I32:$callee))],
+ "PSEUDO CALL INDIRECT\t$callee">;
+ } // isCodeGenOnly = 1
+
+ def CALL_INDIRECT_#vt : SIMD_I<(outs V128:$dst),
+ (ins TypeIndex:$type, i32imm:$flags,
+ variable_ops),
+ [],
+ !strconcat(prefix, "call_indirect\t$dst"),
+ 0x11>;
+}
+
+let Uses = [SP32, SP64], isCall = 1 in {
+ defm : CALL<I32, "i32.">;
+ defm : CALL<I64, "i64.">;
+ defm : CALL<F32, "f32.">;
+ defm : CALL<F64, "f64.">;
+ defm : SIMD_CALL<v16i8, "i8x16.">;
+ defm : SIMD_CALL<v8i16, "i16x8.">;
+ defm : SIMD_CALL<v4i32, "i32x4.">;
+ defm : SIMD_CALL<v4f32, "f32x4.">;
+
+ def CALL_VOID : I<(outs), (ins function32_op:$callee, variable_ops),
+ [(WebAssemblycall0 (i32 imm:$callee))],
+ "call \t$callee", 0x10>;
+
+ let isCodeGenOnly = 1 in {
+ def PCALL_INDIRECT_VOID : I<(outs), (ins I32:$callee, variable_ops),
+ [(WebAssemblycall0 I32:$callee)],
+ "PSEUDO CALL INDIRECT\t$callee">;
+ } // isCodeGenOnly = 1
+
+ def CALL_INDIRECT_VOID : I<(outs),
+ (ins TypeIndex:$type, i32imm:$flags, variable_ops),
+ [],
+ "call_indirect\t", 0x11>;
+} // Uses = [SP32,SP64], isCall = 1
+
+} // Defs = [ARGUMENTS]
+
+// Patterns for matching a direct call to a global address.
+def : Pat<(i32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_I32 tglobaladdr:$callee)>;
+def : Pat<(i64 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_I64 tglobaladdr:$callee)>;
+def : Pat<(f32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_F32 tglobaladdr:$callee)>;
+def : Pat<(f64 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_F64 tglobaladdr:$callee)>;
+def : Pat<(v16i8 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_v16i8 tglobaladdr:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(v8i16 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_v8i16 tglobaladdr:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(v4i32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_v4i32 tglobaladdr:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(v4f32 (WebAssemblycall1 (WebAssemblywrapper tglobaladdr:$callee))),
+ (CALL_v4f32 tglobaladdr:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(WebAssemblycall0 (WebAssemblywrapper tglobaladdr:$callee)),
+ (CALL_VOID tglobaladdr:$callee)>;
+
+// Patterns for matching a direct call to an external symbol.
+def : Pat<(i32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_I32 texternalsym:$callee)>;
+def : Pat<(i64 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_I64 texternalsym:$callee)>;
+def : Pat<(f32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_F32 texternalsym:$callee)>;
+def : Pat<(f64 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_F64 texternalsym:$callee)>;
+def : Pat<(v16i8 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_v16i8 texternalsym:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(v8i16 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_v8i16 texternalsym:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(v4i32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_v4i32 texternalsym:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(v4f32 (WebAssemblycall1 (WebAssemblywrapper texternalsym:$callee))),
+ (CALL_v4f32 texternalsym:$callee)>, Requires<[HasSIMD128]>;
+def : Pat<(WebAssemblycall0 (WebAssemblywrapper texternalsym:$callee)),
+ (CALL_VOID texternalsym:$callee)>;
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td
new file mode 100644
index 000000000000..129794171464
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrControl.td
@@ -0,0 +1,133 @@
+//===- WebAssemblyInstrControl.td-WebAssembly control-flow ------*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly control-flow code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+let Defs = [ARGUMENTS] in {
+
+let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
+// The condition operand is a boolean value which WebAssembly represents as i32.
+def BR_IF : I<(outs), (ins bb_op:$dst, I32:$cond),
+ [(brcond I32:$cond, bb:$dst)],
+ "br_if \t$dst, $cond", 0x0d>;
+let isCodeGenOnly = 1 in
+def BR_UNLESS : I<(outs), (ins bb_op:$dst, I32:$cond), []>;
+let isBarrier = 1 in {
+def BR : I<(outs), (ins bb_op:$dst),
+ [(br bb:$dst)],
+ "br \t$dst", 0x0c>;
+} // isBarrier = 1
+} // isBranch = 1, isTerminator = 1, hasCtrlDep = 1
+
+} // Defs = [ARGUMENTS]
+
+def : Pat<(brcond (i32 (setne I32:$cond, 0)), bb:$dst),
+ (BR_IF bb_op:$dst, I32:$cond)>;
+def : Pat<(brcond (i32 (seteq I32:$cond, 0)), bb:$dst),
+ (BR_UNLESS bb_op:$dst, I32:$cond)>;
+
+let Defs = [ARGUMENTS] in {
+
+// TODO: SelectionDAG's lowering insists on using a pointer as the index for
+// jump tables, so in practice we don't ever use BR_TABLE_I64 in wasm32 mode
+// currently.
+// Set TSFlags{0} to 1 to indicate that the variable_ops are immediates.
+// Set TSFlags{1} to 1 to indicate that the immediates represent labels.
+let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
+def BR_TABLE_I32 : I<(outs), (ins I32:$index, variable_ops),
+ [(WebAssemblybr_table I32:$index)],
+ "br_table \t$index", 0x0e> {
+ let TSFlags{0} = 1;
+ let TSFlags{1} = 1;
+}
+def BR_TABLE_I64 : I<(outs), (ins I64:$index, variable_ops),
+ [(WebAssemblybr_table I64:$index)],
+ "br_table \t$index"> {
+ let TSFlags{0} = 1;
+ let TSFlags{1} = 1;
+}
+} // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1
+
+// Placemarkers to indicate the start or end of a block, loop, or try scope.
+// These use/clobber VALUE_STACK to prevent them from being moved into the
+// middle of an expression tree.
+let Uses = [VALUE_STACK], Defs = [VALUE_STACK] in {
+def BLOCK : I<(outs), (ins Signature:$sig), [], "block \t$sig", 0x02>;
+def LOOP : I<(outs), (ins Signature:$sig), [], "loop \t$sig", 0x03>;
+def TRY : I<(outs), (ins Signature:$sig), [], "try \t$sig", 0x06>;
+
+// END_BLOCK, END_LOOP, END_TRY, and END_FUNCTION are represented with the same
+// opcode in wasm.
+def END_BLOCK : I<(outs), (ins), [], "end_block", 0x0b>;
+def END_LOOP : I<(outs), (ins), [], "end_loop", 0x0b>;
+def END_TRY : I<(outs), (ins), [], "end_try", 0x0b>;
+let isTerminator = 1, isBarrier = 1 in
+def END_FUNCTION : I<(outs), (ins), [], "end_function", 0x0b>;
+} // Uses = [VALUE_STACK], Defs = [VALUE_STACK]
+
+multiclass RETURN<WebAssemblyRegClass vt> {
+ def RETURN_#vt : I<(outs), (ins vt:$val), [(WebAssemblyreturn vt:$val)],
+ "return \t$val", 0x0f>;
+ // Equivalent to RETURN_#vt, for use at the end of a function when wasm
+ // semantics return by falling off the end of the block.
+ let isCodeGenOnly = 1 in
+ def FALLTHROUGH_RETURN_#vt : I<(outs), (ins vt:$val), []>;
+}
+
+multiclass SIMD_RETURN<ValueType vt> {
+ def RETURN_#vt : SIMD_I<(outs), (ins V128:$val),
+ [(WebAssemblyreturn (vt V128:$val))],
+ "return \t$val", 0x0f>;
+ // Equivalent to RETURN_#vt, for use at the end of a function when wasm
+ // semantics return by falling off the end of the block.
+ let isCodeGenOnly = 1 in
+ def FALLTHROUGH_RETURN_#vt : SIMD_I<(outs), (ins V128:$val), []>;
+}
+
+let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
+
+let isReturn = 1 in {
+ defm : RETURN<I32>;
+ defm : RETURN<I64>;
+ defm : RETURN<F32>;
+ defm : RETURN<F64>;
+ defm : SIMD_RETURN<v16i8>;
+ defm : SIMD_RETURN<v8i16>;
+ defm : SIMD_RETURN<v4i32>;
+ defm : SIMD_RETURN<v4f32>;
+
+ def RETURN_VOID : I<(outs), (ins), [(WebAssemblyreturn)], "return", 0x0f>;
+
+ // This is to RETURN_VOID what FALLTHROUGH_RETURN_#vt is to RETURN_#vt.
+ let isCodeGenOnly = 1 in
+ def FALLTHROUGH_RETURN_VOID : I<(outs), (ins), []>;
+} // isReturn = 1
+
+def UNREACHABLE : I<(outs), (ins), [(trap)], "unreachable", 0x00>;
+
+def THROW_I32 : I<(outs), (ins i32imm:$tag, I32:$obj),
+ [(int_wasm_throw imm:$tag, I32:$obj)], "throw \t$tag, $obj",
+ 0x08>;
+def THROW_I64 : I<(outs), (ins i32imm:$tag, I64:$obj),
+ [(int_wasm_throw imm:$tag, I64:$obj)], "throw \t$tag, $obj",
+ 0x08>;
+def RETHROW : I<(outs), (ins i32imm:$rel_depth), [], "rethrow \t$rel_depth",
+ 0x09>;
+
+} // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1
+
+} // Defs = [ARGUMENTS]
+
+// rethrow takes a relative depth as an argument, for which currently only 0 is
+// possible for C++. Once other languages need depths other than 0, depths will
+// be computed in CFGStackify.
+def : Pat<(int_wasm_rethrow), (RETHROW 0)>;
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td
new file mode 100644
index 000000000000..426c2c802172
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrConv.td
@@ -0,0 +1,185 @@
+//===-- WebAssemblyInstrConv.td-WebAssembly Conversion support -*- tablegen -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly datatype conversions, truncations, reinterpretations,
+/// promotions, and demotions operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+let Defs = [ARGUMENTS] in {
+
+def I32_WRAP_I64 : I<(outs I32:$dst), (ins I64:$src),
+ [(set I32:$dst, (trunc I64:$src))],
+ "i32.wrap/i64\t$dst, $src", 0xa7>;
+
+def I64_EXTEND_S_I32 : I<(outs I64:$dst), (ins I32:$src),
+ [(set I64:$dst, (sext I32:$src))],
+ "i64.extend_s/i32\t$dst, $src", 0xac>;
+def I64_EXTEND_U_I32 : I<(outs I64:$dst), (ins I32:$src),
+ [(set I64:$dst, (zext I32:$src))],
+ "i64.extend_u/i32\t$dst, $src", 0xad>;
+
+let Predicates = [HasAtomics] in {
+def I32_EXTEND8_S_I32 : I<(outs I32:$dst), (ins I32:$src),
+ [(set I32:$dst, (sext_inreg I32:$src, i8))],
+ "i32.extend8_s\t$dst, $src", 0xc0>;
+def I32_EXTEND16_S_I32 : I<(outs I32:$dst), (ins I32:$src),
+ [(set I32:$dst, (sext_inreg I32:$src, i16))],
+ "i32.extend16_s\t$dst, $src", 0xc1>;
+def I64_EXTEND8_S_I64 : I<(outs I64:$dst), (ins I64:$src),
+ [(set I64:$dst, (sext_inreg I64:$src, i8))],
+ "i64.extend8_s\t$dst, $src", 0xc2>;
+def I64_EXTEND16_S_I64 : I<(outs I64:$dst), (ins I64:$src),
+ [(set I64:$dst, (sext_inreg I64:$src, i16))],
+ "i64.extend16_s\t$dst, $src", 0xc3>;
+def I64_EXTEND32_S_I64 : I<(outs I64:$dst), (ins I64:$src),
+ [(set I64:$dst, (sext_inreg I64:$src, i32))],
+ "i64.extend32_s\t$dst, $src", 0xc4>;
+} // Predicates = [HasAtomics]
+
+} // defs = [ARGUMENTS]
+
+// Expand a "don't care" extend into zero-extend (chosen over sign-extend
+// somewhat arbitrarily, although it favors popular hardware architectures
+// and is conceptually a simpler operation).
+def : Pat<(i64 (anyext I32:$src)), (I64_EXTEND_U_I32 I32:$src)>;
+
+let Defs = [ARGUMENTS] in {
+
+// Conversion from floating point to integer instructions which don't trap on
+// overflow or invalid.
+def I32_TRUNC_S_SAT_F32 : I<(outs I32:$dst), (ins F32:$src),
+ [(set I32:$dst, (fp_to_sint F32:$src))],
+ "i32.trunc_s:sat/f32\t$dst, $src", 0xfc00>,
+ Requires<[HasNontrappingFPToInt]>;
+def I32_TRUNC_U_SAT_F32 : I<(outs I32:$dst), (ins F32:$src),
+ [(set I32:$dst, (fp_to_uint F32:$src))],
+ "i32.trunc_u:sat/f32\t$dst, $src", 0xfc01>,
+ Requires<[HasNontrappingFPToInt]>;
+def I64_TRUNC_S_SAT_F32 : I<(outs I64:$dst), (ins F32:$src),
+ [(set I64:$dst, (fp_to_sint F32:$src))],
+ "i64.trunc_s:sat/f32\t$dst, $src", 0xfc04>,
+ Requires<[HasNontrappingFPToInt]>;
+def I64_TRUNC_U_SAT_F32 : I<(outs I64:$dst), (ins F32:$src),
+ [(set I64:$dst, (fp_to_uint F32:$src))],
+ "i64.trunc_u:sat/f32\t$dst, $src", 0xfc05>,
+ Requires<[HasNontrappingFPToInt]>;
+def I32_TRUNC_S_SAT_F64 : I<(outs I32:$dst), (ins F64:$src),
+ [(set I32:$dst, (fp_to_sint F64:$src))],
+ "i32.trunc_s:sat/f64\t$dst, $src", 0xfc02>,
+ Requires<[HasNontrappingFPToInt]>;
+def I32_TRUNC_U_SAT_F64 : I<(outs I32:$dst), (ins F64:$src),
+ [(set I32:$dst, (fp_to_uint F64:$src))],
+ "i32.trunc_u:sat/f64\t$dst, $src", 0xfc03>,
+ Requires<[HasNontrappingFPToInt]>;
+def I64_TRUNC_S_SAT_F64 : I<(outs I64:$dst), (ins F64:$src),
+ [(set I64:$dst, (fp_to_sint F64:$src))],
+ "i64.trunc_s:sat/f64\t$dst, $src", 0xfc06>,
+ Requires<[HasNontrappingFPToInt]>;
+def I64_TRUNC_U_SAT_F64 : I<(outs I64:$dst), (ins F64:$src),
+ [(set I64:$dst, (fp_to_uint F64:$src))],
+ "i64.trunc_u:sat/f64\t$dst, $src", 0xfc07>,
+ Requires<[HasNontrappingFPToInt]>;
+
+// Conversion from floating point to integer pseudo-instructions which don't
+// trap on overflow or invalid.
+let usesCustomInserter = 1, isCodeGenOnly = 1 in {
+def FP_TO_SINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src),
+ [(set I32:$dst, (fp_to_sint F32:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+def FP_TO_UINT_I32_F32 : I<(outs I32:$dst), (ins F32:$src),
+ [(set I32:$dst, (fp_to_uint F32:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+def FP_TO_SINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src),
+ [(set I64:$dst, (fp_to_sint F32:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+def FP_TO_UINT_I64_F32 : I<(outs I64:$dst), (ins F32:$src),
+ [(set I64:$dst, (fp_to_uint F32:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+def FP_TO_SINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src),
+ [(set I32:$dst, (fp_to_sint F64:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+def FP_TO_UINT_I32_F64 : I<(outs I32:$dst), (ins F64:$src),
+ [(set I32:$dst, (fp_to_uint F64:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+def FP_TO_SINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src),
+ [(set I64:$dst, (fp_to_sint F64:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+def FP_TO_UINT_I64_F64 : I<(outs I64:$dst), (ins F64:$src),
+ [(set I64:$dst, (fp_to_uint F64:$src))], "", 0>,
+ Requires<[NotHasNontrappingFPToInt]>;
+} // usesCustomInserter, isCodeGenOnly = 1
+
+// Conversion from floating point to integer traps on overflow and invalid.
+let hasSideEffects = 1 in {
+def I32_TRUNC_S_F32 : I<(outs I32:$dst), (ins F32:$src),
+ [], "i32.trunc_s/f32\t$dst, $src", 0xa8>;
+def I32_TRUNC_U_F32 : I<(outs I32:$dst), (ins F32:$src),
+ [], "i32.trunc_u/f32\t$dst, $src", 0xa9>;
+def I64_TRUNC_S_F32 : I<(outs I64:$dst), (ins F32:$src),
+ [], "i64.trunc_s/f32\t$dst, $src", 0xae>;
+def I64_TRUNC_U_F32 : I<(outs I64:$dst), (ins F32:$src),
+ [], "i64.trunc_u/f32\t$dst, $src", 0xaf>;
+def I32_TRUNC_S_F64 : I<(outs I32:$dst), (ins F64:$src),
+ [], "i32.trunc_s/f64\t$dst, $src", 0xaa>;
+def I32_TRUNC_U_F64 : I<(outs I32:$dst), (ins F64:$src),
+ [], "i32.trunc_u/f64\t$dst, $src", 0xab>;
+def I64_TRUNC_S_F64 : I<(outs I64:$dst), (ins F64:$src),
+ [], "i64.trunc_s/f64\t$dst, $src", 0xb0>;
+def I64_TRUNC_U_F64 : I<(outs I64:$dst), (ins F64:$src),
+ [], "i64.trunc_u/f64\t$dst, $src", 0xb1>;
+} // hasSideEffects = 1
+
+def F32_CONVERT_S_I32 : I<(outs F32:$dst), (ins I32:$src),
+ [(set F32:$dst, (sint_to_fp I32:$src))],
+ "f32.convert_s/i32\t$dst, $src", 0xb2>;
+def F32_CONVERT_U_I32 : I<(outs F32:$dst), (ins I32:$src),
+ [(set F32:$dst, (uint_to_fp I32:$src))],
+ "f32.convert_u/i32\t$dst, $src", 0xb3>;
+def F64_CONVERT_S_I32 : I<(outs F64:$dst), (ins I32:$src),
+ [(set F64:$dst, (sint_to_fp I32:$src))],
+ "f64.convert_s/i32\t$dst, $src", 0xb7>;
+def F64_CONVERT_U_I32 : I<(outs F64:$dst), (ins I32:$src),
+ [(set F64:$dst, (uint_to_fp I32:$src))],
+ "f64.convert_u/i32\t$dst, $src", 0xb8>;
+def F32_CONVERT_S_I64 : I<(outs F32:$dst), (ins I64:$src),
+ [(set F32:$dst, (sint_to_fp I64:$src))],
+ "f32.convert_s/i64\t$dst, $src", 0xb4>;
+def F32_CONVERT_U_I64 : I<(outs F32:$dst), (ins I64:$src),
+ [(set F32:$dst, (uint_to_fp I64:$src))],
+ "f32.convert_u/i64\t$dst, $src", 0xb5>;
+def F64_CONVERT_S_I64 : I<(outs F64:$dst), (ins I64:$src),
+ [(set F64:$dst, (sint_to_fp I64:$src))],
+ "f64.convert_s/i64\t$dst, $src", 0xb9>;
+def F64_CONVERT_U_I64 : I<(outs F64:$dst), (ins I64:$src),
+ [(set F64:$dst, (uint_to_fp I64:$src))],
+ "f64.convert_u/i64\t$dst, $src", 0xba>;
+
+def F64_PROMOTE_F32 : I<(outs F64:$dst), (ins F32:$src),
+ [(set F64:$dst, (fpextend F32:$src))],
+ "f64.promote/f32\t$dst, $src", 0xbb>;
+def F32_DEMOTE_F64 : I<(outs F32:$dst), (ins F64:$src),
+ [(set F32:$dst, (fpround F64:$src))],
+ "f32.demote/f64\t$dst, $src", 0xb6>;
+
+def I32_REINTERPRET_F32 : I<(outs I32:$dst), (ins F32:$src),
+ [(set I32:$dst, (bitconvert F32:$src))],
+ "i32.reinterpret/f32\t$dst, $src", 0xbc>;
+def F32_REINTERPRET_I32 : I<(outs F32:$dst), (ins I32:$src),
+ [(set F32:$dst, (bitconvert I32:$src))],
+ "f32.reinterpret/i32\t$dst, $src", 0xbe>;
+def I64_REINTERPRET_F64 : I<(outs I64:$dst), (ins F64:$src),
+ [(set I64:$dst, (bitconvert F64:$src))],
+ "i64.reinterpret/f64\t$dst, $src", 0xbd>;
+def F64_REINTERPRET_I64 : I<(outs F64:$dst), (ins I64:$src),
+ [(set F64:$dst, (bitconvert I64:$src))],
+ "f64.reinterpret/i64\t$dst, $src", 0xbf>;
+
+} // Defs = [ARGUMENTS]
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
new file mode 100644
index 000000000000..03c9c1f8d5c0
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFloat.td
@@ -0,0 +1,101 @@
+// WebAssemblyInstrFloat.td-WebAssembly Float codegen support ---*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly Floating-point operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+let Defs = [ARGUMENTS] in {
+
+let isCommutable = 1 in
+defm ADD : BinaryFP<fadd, "add ", 0x92, 0xa0>;
+defm SUB : BinaryFP<fsub, "sub ", 0x93, 0xa1>;
+let isCommutable = 1 in
+defm MUL : BinaryFP<fmul, "mul ", 0x94, 0xa2>;
+defm DIV : BinaryFP<fdiv, "div ", 0x95, 0xa3>;
+defm SQRT : UnaryFP<fsqrt, "sqrt", 0x91, 0x9f>;
+
+defm ABS : UnaryFP<fabs, "abs ", 0x8b, 0x99>;
+defm NEG : UnaryFP<fneg, "neg ", 0x8c, 0x9a>;
+defm COPYSIGN : BinaryFP<fcopysign, "copysign", 0x98, 0xa6>;
+
+let isCommutable = 1 in {
+defm MIN : BinaryFP<fminnan, "min ", 0x96, 0xa4>;
+defm MAX : BinaryFP<fmaxnan, "max ", 0x97, 0xa5>;
+} // isCommutable = 1
+
+defm CEIL : UnaryFP<fceil, "ceil", 0x8d, 0x9b>;
+defm FLOOR : UnaryFP<ffloor, "floor", 0x8e, 0x9c>;
+defm TRUNC : UnaryFP<ftrunc, "trunc", 0x8f, 0x9d>;
+defm NEAREST : UnaryFP<fnearbyint, "nearest", 0x90, 0x9e>;
+
+} // Defs = [ARGUMENTS]
+
+// DAGCombine oddly folds casts into the rhs of copysign. Unfold them.
+def : Pat<(fcopysign F64:$lhs, F32:$rhs),
+ (COPYSIGN_F64 F64:$lhs, (F64_PROMOTE_F32 F32:$rhs))>;
+def : Pat<(fcopysign F32:$lhs, F64:$rhs),
+ (COPYSIGN_F32 F32:$lhs, (F32_DEMOTE_F64 F64:$rhs))>;
+
+// WebAssembly doesn't expose inexact exceptions, so map frint to fnearbyint.
+def : Pat<(frint f32:$src), (NEAREST_F32 f32:$src)>;
+def : Pat<(frint f64:$src), (NEAREST_F64 f64:$src)>;
+
+let Defs = [ARGUMENTS] in {
+
+let isCommutable = 1 in {
+defm EQ : ComparisonFP<SETOEQ, "eq ", 0x5b, 0x61>;
+defm NE : ComparisonFP<SETUNE, "ne ", 0x5c, 0x62>;
+} // isCommutable = 1
+defm LT : ComparisonFP<SETOLT, "lt ", 0x5d, 0x63>;
+defm LE : ComparisonFP<SETOLE, "le ", 0x5f, 0x65>;
+defm GT : ComparisonFP<SETOGT, "gt ", 0x5e, 0x64>;
+defm GE : ComparisonFP<SETOGE, "ge ", 0x60, 0x66>;
+
+} // Defs = [ARGUMENTS]
+
+// Don't care floating-point comparisons, supported via other comparisons.
+def : Pat<(seteq f32:$lhs, f32:$rhs), (EQ_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setne f32:$lhs, f32:$rhs), (NE_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setlt f32:$lhs, f32:$rhs), (LT_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setle f32:$lhs, f32:$rhs), (LE_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setgt f32:$lhs, f32:$rhs), (GT_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(setge f32:$lhs, f32:$rhs), (GE_F32 f32:$lhs, f32:$rhs)>;
+def : Pat<(seteq f64:$lhs, f64:$rhs), (EQ_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setne f64:$lhs, f64:$rhs), (NE_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setlt f64:$lhs, f64:$rhs), (LT_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setle f64:$lhs, f64:$rhs), (LE_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setgt f64:$lhs, f64:$rhs), (GT_F64 f64:$lhs, f64:$rhs)>;
+def : Pat<(setge f64:$lhs, f64:$rhs), (GE_F64 f64:$lhs, f64:$rhs)>;
+
+let Defs = [ARGUMENTS] in {
+
+def SELECT_F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs, I32:$cond),
+ [(set F32:$dst, (select I32:$cond, F32:$lhs, F32:$rhs))],
+ "f32.select\t$dst, $lhs, $rhs, $cond", 0x1b>;
+def SELECT_F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs, I32:$cond),
+ [(set F64:$dst, (select I32:$cond, F64:$lhs, F64:$rhs))],
+ "f64.select\t$dst, $lhs, $rhs, $cond", 0x1b>;
+
+} // Defs = [ARGUMENTS]
+
+// ISD::SELECT requires its operand to conform to getBooleanContents, but
+// WebAssembly's select interprets any non-zero value as true, so we can fold
+// a setne with 0 into a select.
+def : Pat<(select (i32 (setne I32:$cond, 0)), F32:$lhs, F32:$rhs),
+ (SELECT_F32 F32:$lhs, F32:$rhs, I32:$cond)>;
+def : Pat<(select (i32 (setne I32:$cond, 0)), F64:$lhs, F64:$rhs),
+ (SELECT_F64 F64:$lhs, F64:$rhs, I32:$cond)>;
+
+// And again, this time with seteq instead of setne and the arms reversed.
+def : Pat<(select (i32 (seteq I32:$cond, 0)), F32:$lhs, F32:$rhs),
+ (SELECT_F32 F32:$rhs, F32:$lhs, I32:$cond)>;
+def : Pat<(select (i32 (seteq I32:$cond, 0)), F64:$lhs, F64:$rhs),
+ (SELECT_F64 F64:$rhs, F64:$lhs, I32:$cond)>;
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td
new file mode 100644
index 000000000000..4f41fcc232e9
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrFormats.td
@@ -0,0 +1,106 @@
+//=- WebAssemblyInstrFormats.td - WebAssembly Instr. Formats -*- tablegen -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly instruction format definitions.
+///
+//===----------------------------------------------------------------------===//
+
+// WebAssembly Instruction Format.
+class WebAssemblyInst<bits<32> inst, string asmstr> : Instruction {
+ field bits<32> Inst = inst; // Instruction encoding.
+ let Namespace = "WebAssembly";
+ let Pattern = [];
+ let AsmString = asmstr;
+}
+
+// Normal instructions.
+class I<dag oops, dag iops, list<dag> pattern, string asmstr = "", bits<32> inst = -1>
+ : WebAssemblyInst<inst, asmstr> {
+ dag OutOperandList = oops;
+ dag InOperandList = iops;
+ let Pattern = pattern;
+}
+
+class SIMD_I<dag oops, dag iops, list<dag> pattern,
+ string asmstr = "", bits<32> inst = -1>
+ : I<oops, iops, pattern, asmstr, inst>, Requires<[HasSIMD128]>;
+
+class ATOMIC_I<dag oops, dag iops, list<dag> pattern,
+ string asmstr = "", bits<32> inst = -1>
+ : I<oops, iops, pattern, asmstr, inst>, Requires<[HasAtomics]>;
+
+// Unary and binary instructions, for the local types that WebAssembly supports.
+multiclass UnaryInt<SDNode node, string name, bits<32> i32Inst, bits<32> i64Inst> {
+ def _I32 : I<(outs I32:$dst), (ins I32:$src),
+ [(set I32:$dst, (node I32:$src))],
+ !strconcat("i32.", !strconcat(name, "\t$dst, $src")), i32Inst>;
+ def _I64 : I<(outs I64:$dst), (ins I64:$src),
+ [(set I64:$dst, (node I64:$src))],
+ !strconcat("i64.", !strconcat(name, "\t$dst, $src")), i64Inst>;
+}
+multiclass BinaryInt<SDNode node, string name, bits<32> i32Inst, bits<32> i64Inst> {
+ def _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs),
+ [(set I32:$dst, (node I32:$lhs, I32:$rhs))],
+ !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")), i32Inst>;
+ def _I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs),
+ [(set I64:$dst, (node I64:$lhs, I64:$rhs))],
+ !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")), i64Inst>;
+}
+multiclass UnaryFP<SDNode node, string name, bits<32> f32Inst, bits<32> f64Inst> {
+ def _F32 : I<(outs F32:$dst), (ins F32:$src),
+ [(set F32:$dst, (node F32:$src))],
+ !strconcat("f32.", !strconcat(name, "\t$dst, $src")), f32Inst>;
+ def _F64 : I<(outs F64:$dst), (ins F64:$src),
+ [(set F64:$dst, (node F64:$src))],
+ !strconcat("f64.", !strconcat(name, "\t$dst, $src")), f64Inst>;
+}
+multiclass BinaryFP<SDNode node, string name, bits<32> f32Inst, bits<32> f64Inst> {
+ def _F32 : I<(outs F32:$dst), (ins F32:$lhs, F32:$rhs),
+ [(set F32:$dst, (node F32:$lhs, F32:$rhs))],
+ !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")), f32Inst>;
+ def _F64 : I<(outs F64:$dst), (ins F64:$lhs, F64:$rhs),
+ [(set F64:$dst, (node F64:$lhs, F64:$rhs))],
+ !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")), f64Inst>;
+}
+multiclass SIMDBinary<SDNode node, SDNode fnode, string name> {
+ def _I8x16 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ [(set (v16i8 V128:$dst), (node V128:$lhs, V128:$rhs))],
+ !strconcat("i8x16.", !strconcat(name, "\t$dst, $lhs, $rhs"))>;
+ def _I16x8 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ [(set (v8i16 V128:$dst), (node V128:$lhs, V128:$rhs))],
+ !strconcat("i16x8.", !strconcat(name, "\t$dst, $lhs, $rhs"))>;
+ def _I32x4 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ [(set (v4i32 V128:$dst), (node V128:$lhs, V128:$rhs))],
+ !strconcat("i32x4.", !strconcat(name, "\t$dst, $lhs, $rhs"))>;
+ def _F32x4 : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs),
+ [(set (v4f32 V128:$dst), (fnode V128:$lhs, V128:$rhs))],
+ !strconcat("f32x4.", !strconcat(name, "\t$dst, $lhs, $rhs"))>;
+
+}
+multiclass ComparisonInt<CondCode cond, string name, bits<32> i32Inst, bits<32> i64Inst> {
+ def _I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs),
+ [(set I32:$dst, (setcc I32:$lhs, I32:$rhs, cond))],
+ !strconcat("i32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ i32Inst>;
+ def _I64 : I<(outs I32:$dst), (ins I64:$lhs, I64:$rhs),
+ [(set I32:$dst, (setcc I64:$lhs, I64:$rhs, cond))],
+ !strconcat("i64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ i64Inst>;
+}
+multiclass ComparisonFP<CondCode cond, string name, bits<32> f32Inst, bits<32> f64Inst> {
+ def _F32 : I<(outs I32:$dst), (ins F32:$lhs, F32:$rhs),
+ [(set I32:$dst, (setcc F32:$lhs, F32:$rhs, cond))],
+ !strconcat("f32.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ f32Inst>;
+ def _F64 : I<(outs I32:$dst), (ins F64:$lhs, F64:$rhs),
+ [(set I32:$dst, (setcc F64:$lhs, F64:$rhs, cond))],
+ !strconcat("f64.", !strconcat(name, "\t$dst, $lhs, $rhs")),
+ f64Inst>;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
new file mode 100644
index 000000000000..8846952e5af4
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
@@ -0,0 +1,202 @@
+//===-- WebAssemblyInstrInfo.cpp - WebAssembly Instruction Information ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the WebAssembly implementation of the
+/// TargetInstrInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyInstrInfo.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-instr-info"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "WebAssemblyGenInstrInfo.inc"
+
+WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI)
+ : WebAssemblyGenInstrInfo(WebAssembly::ADJCALLSTACKDOWN,
+ WebAssembly::ADJCALLSTACKUP),
+ RI(STI.getTargetTriple()) {}
+
+bool WebAssemblyInstrInfo::isReallyTriviallyReMaterializable(
+ const MachineInstr &MI, AliasAnalysis *AA) const {
+ switch (MI.getOpcode()) {
+ case WebAssembly::CONST_I32:
+ case WebAssembly::CONST_I64:
+ case WebAssembly::CONST_F32:
+ case WebAssembly::CONST_F64:
+ // isReallyTriviallyReMaterializableGeneric misses these because of the
+ // ARGUMENTS implicit def, so we manualy override it here.
+ return true;
+ default:
+ return false;
+ }
+}
+
+void WebAssemblyInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
+ // This method is called by post-RA expansion, which expects only pregs to
+ // exist. However we need to handle both here.
+ auto &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterClass *RC =
+ TargetRegisterInfo::isVirtualRegister(DestReg)
+ ? MRI.getRegClass(DestReg)
+ : MRI.getTargetRegisterInfo()->getMinimalPhysRegClass(DestReg);
+
+ unsigned CopyOpcode;
+ if (RC == &WebAssembly::I32RegClass)
+ CopyOpcode = WebAssembly::COPY_I32;
+ else if (RC == &WebAssembly::I64RegClass)
+ CopyOpcode = WebAssembly::COPY_I64;
+ else if (RC == &WebAssembly::F32RegClass)
+ CopyOpcode = WebAssembly::COPY_F32;
+ else if (RC == &WebAssembly::F64RegClass)
+ CopyOpcode = WebAssembly::COPY_F64;
+ else
+ llvm_unreachable("Unexpected register class");
+
+ BuildMI(MBB, I, DL, get(CopyOpcode), DestReg)
+ .addReg(SrcReg, KillSrc ? RegState::Kill : 0);
+}
+
+MachineInstr *
+WebAssemblyInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const {
+ // If the operands are stackified, we can't reorder them.
+ WebAssemblyFunctionInfo &MFI =
+ *MI.getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
+ if (MFI.isVRegStackified(MI.getOperand(OpIdx1).getReg()) ||
+ MFI.isVRegStackified(MI.getOperand(OpIdx2).getReg()))
+ return nullptr;
+
+ // Otherwise use the default implementation.
+ return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
+}
+
+// Branch analysis.
+bool WebAssemblyInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool /*AllowModify*/) const {
+ bool HaveCond = false;
+ for (MachineInstr &MI : MBB.terminators()) {
+ switch (MI.getOpcode()) {
+ default:
+ // Unhandled instruction; bail out.
+ return true;
+ case WebAssembly::BR_IF:
+ if (HaveCond)
+ return true;
+ // If we're running after CFGStackify, we can't optimize further.
+ if (!MI.getOperand(0).isMBB())
+ return true;
+ Cond.push_back(MachineOperand::CreateImm(true));
+ Cond.push_back(MI.getOperand(1));
+ TBB = MI.getOperand(0).getMBB();
+ HaveCond = true;
+ break;
+ case WebAssembly::BR_UNLESS:
+ if (HaveCond)
+ return true;
+ // If we're running after CFGStackify, we can't optimize further.
+ if (!MI.getOperand(0).isMBB())
+ return true;
+ Cond.push_back(MachineOperand::CreateImm(false));
+ Cond.push_back(MI.getOperand(1));
+ TBB = MI.getOperand(0).getMBB();
+ HaveCond = true;
+ break;
+ case WebAssembly::BR:
+ // If we're running after CFGStackify, we can't optimize further.
+ if (!MI.getOperand(0).isMBB())
+ return true;
+ if (!HaveCond)
+ TBB = MI.getOperand(0).getMBB();
+ else
+ FBB = MI.getOperand(0).getMBB();
+ break;
+ }
+ if (MI.isBarrier())
+ break;
+ }
+
+ return false;
+}
+
+unsigned WebAssemblyInstrInfo::removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved) const {
+ assert(!BytesRemoved && "code size not handled");
+
+ MachineBasicBlock::instr_iterator I = MBB.instr_end();
+ unsigned Count = 0;
+
+ while (I != MBB.instr_begin()) {
+ --I;
+ if (I->isDebugValue())
+ continue;
+ if (!I->isTerminator())
+ break;
+ // Remove the branch.
+ I->eraseFromParent();
+ I = MBB.instr_end();
+ ++Count;
+ }
+
+ return Count;
+}
+
+unsigned WebAssemblyInstrInfo::insertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL,
+ int *BytesAdded) const {
+ assert(!BytesAdded && "code size not handled");
+
+ if (Cond.empty()) {
+ if (!TBB)
+ return 0;
+
+ BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(TBB);
+ return 1;
+ }
+
+ assert(Cond.size() == 2 && "Expected a flag and a successor block");
+
+ if (Cond[0].getImm()) {
+ BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]);
+ } else {
+ BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)).addMBB(TBB).add(Cond[1]);
+ }
+ if (!FBB)
+ return 1;
+
+ BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(FBB);
+ return 2;
+}
+
+bool WebAssemblyInstrInfo::reverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 2 && "Expected a flag and a successor block");
+ Cond.front() = MachineOperand::CreateImm(!Cond.front().getImm());
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
new file mode 100644
index 000000000000..eb74106336ed
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.h
@@ -0,0 +1,63 @@
+//=- WebAssemblyInstrInfo.h - WebAssembly Instruction Information -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the WebAssembly implementation of the
+/// TargetInstrInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYINSTRINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYINSTRINFO_H
+
+#include "WebAssemblyRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "WebAssemblyGenInstrInfo.inc"
+
+namespace llvm {
+
+class WebAssemblySubtarget;
+
+class WebAssemblyInstrInfo final : public WebAssemblyGenInstrInfo {
+ const WebAssemblyRegisterInfo RI;
+
+public:
+ explicit WebAssemblyInstrInfo(const WebAssemblySubtarget &STI);
+
+ const WebAssemblyRegisterInfo &getRegisterInfo() const { return RI; }
+
+ bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
+ AliasAnalysis *AA) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+ MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const override;
+
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const override;
+ unsigned removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved = nullptr) const override;
+ unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL,
+ int *BytesAdded = nullptr) const override;
+ bool
+ reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
new file mode 100644
index 000000000000..f8d311ac3b00
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.td
@@ -0,0 +1,251 @@
+// WebAssemblyInstrInfo.td-Describe the WebAssembly Instructions-*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly Instruction definitions.
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Instruction Predicate Definitions.
+//===----------------------------------------------------------------------===//
+
+def HasAddr32 : Predicate<"!Subtarget->hasAddr64()">;
+def HasAddr64 : Predicate<"Subtarget->hasAddr64()">;
+def HasSIMD128 : Predicate<"Subtarget->hasSIMD128()">,
+ AssemblerPredicate<"FeatureSIMD128", "simd128">;
+def HasAtomics : Predicate<"Subtarget->hasAtomics()">,
+ AssemblerPredicate<"FeatureAtomics", "atomics">;
+def HasNontrappingFPToInt :
+ Predicate<"Subtarget->hasNontrappingFPToInt()">,
+ AssemblerPredicate<"FeatureNontrappingFPToInt",
+ "nontrapping-fptoint">;
+def NotHasNontrappingFPToInt :
+ Predicate<"!Subtarget->hasNontrappingFPToInt()">,
+ AssemblerPredicate<"!FeatureNontrappingFPToInt",
+ "nontrapping-fptoint">;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly-specific DAG Node Types.
+//===----------------------------------------------------------------------===//
+
+def SDT_WebAssemblyCallSeqStart : SDCallSeqStart<[SDTCisVT<0, iPTR>,
+ SDTCisVT<1, iPTR>]>;
+def SDT_WebAssemblyCallSeqEnd :
+ SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
+def SDT_WebAssemblyCall0 : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
+def SDT_WebAssemblyCall1 : SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>;
+def SDT_WebAssemblyBrTable : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
+def SDT_WebAssemblyArgument : SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>;
+def SDT_WebAssemblyReturn : SDTypeProfile<0, -1, []>;
+def SDT_WebAssemblyWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
+ SDTCisPtrTy<0>]>;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly-specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def WebAssemblycallseq_start :
+ SDNode<"ISD::CALLSEQ_START", SDT_WebAssemblyCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+def WebAssemblycallseq_end :
+ SDNode<"ISD::CALLSEQ_END", SDT_WebAssemblyCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def WebAssemblycall0 : SDNode<"WebAssemblyISD::CALL0",
+ SDT_WebAssemblyCall0,
+ [SDNPHasChain, SDNPVariadic]>;
+def WebAssemblycall1 : SDNode<"WebAssemblyISD::CALL1",
+ SDT_WebAssemblyCall1,
+ [SDNPHasChain, SDNPVariadic]>;
+def WebAssemblybr_table : SDNode<"WebAssemblyISD::BR_TABLE",
+ SDT_WebAssemblyBrTable,
+ [SDNPHasChain, SDNPVariadic]>;
+def WebAssemblyargument : SDNode<"WebAssemblyISD::ARGUMENT",
+ SDT_WebAssemblyArgument>;
+def WebAssemblyreturn : SDNode<"WebAssemblyISD::RETURN",
+ SDT_WebAssemblyReturn, [SDNPHasChain]>;
+def WebAssemblywrapper : SDNode<"WebAssemblyISD::Wrapper",
+ SDT_WebAssemblyWrapper>;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly-specific Operands.
+//===----------------------------------------------------------------------===//
+
+let OperandNamespace = "WebAssembly" in {
+
+let OperandType = "OPERAND_BASIC_BLOCK" in
+def bb_op : Operand<OtherVT>;
+
+let OperandType = "OPERAND_LOCAL" in
+def local_op : Operand<i32>;
+
+let OperandType = "OPERAND_GLOBAL" in
+def global_op : Operand<i32>;
+
+let OperandType = "OPERAND_I32IMM" in
+def i32imm_op : Operand<i32>;
+
+let OperandType = "OPERAND_I64IMM" in
+def i64imm_op : Operand<i64>;
+
+let OperandType = "OPERAND_F32IMM" in
+def f32imm_op : Operand<f32>;
+
+let OperandType = "OPERAND_F64IMM" in
+def f64imm_op : Operand<f64>;
+
+let OperandType = "OPERAND_FUNCTION32" in
+def function32_op : Operand<i32>;
+
+let OperandType = "OPERAND_OFFSET32" in
+def offset32_op : Operand<i32>;
+
+let OperandType = "OPERAND_P2ALIGN" in {
+def P2Align : Operand<i32> {
+ let PrintMethod = "printWebAssemblyP2AlignOperand";
+}
+} // OperandType = "OPERAND_P2ALIGN"
+
+let OperandType = "OPERAND_SIGNATURE" in {
+def Signature : Operand<i32> {
+ let PrintMethod = "printWebAssemblySignatureOperand";
+}
+} // OperandType = "OPERAND_SIGNATURE"
+
+let OperandType = "OPERAND_TYPEINDEX" in
+def TypeIndex : Operand<i32>;
+
+} // OperandNamespace = "WebAssembly"
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Instruction Format Definitions.
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyInstrFormats.td"
+
+//===----------------------------------------------------------------------===//
+// Additional instructions.
+//===----------------------------------------------------------------------===//
+
+multiclass ARGUMENT<WebAssemblyRegClass vt> {
+ let hasSideEffects = 1, Uses = [ARGUMENTS], isCodeGenOnly = 1 in
+ def ARGUMENT_#vt : I<(outs vt:$res), (ins i32imm:$argno),
+ [(set vt:$res, (WebAssemblyargument timm:$argno))]>;
+}
+multiclass SIMD_ARGUMENT<ValueType vt> {
+ let hasSideEffects = 1, Uses = [ARGUMENTS], isCodeGenOnly = 1 in
+ def ARGUMENT_#vt : SIMD_I<(outs V128:$res), (ins i32imm:$argno),
+ [(set (vt V128:$res),
+ (WebAssemblyargument timm:$argno))]>;
+}
+defm : ARGUMENT<I32>;
+defm : ARGUMENT<I64>;
+defm : ARGUMENT<F32>;
+defm : ARGUMENT<F64>;
+defm : SIMD_ARGUMENT<v16i8>;
+defm : SIMD_ARGUMENT<v8i16>;
+defm : SIMD_ARGUMENT<v4i32>;
+defm : SIMD_ARGUMENT<v4f32>;
+
+let Defs = [ARGUMENTS] in {
+
+// get_local and set_local are not generated by instruction selection; they
+// are implied by virtual register uses and defs.
+multiclass LOCAL<WebAssemblyRegClass vt> {
+let hasSideEffects = 0 in {
+ // COPY is not an actual instruction in wasm, but since we allow get_local and
+ // set_local to be implicit during most of codegen, we can have a COPY which
+ // is actually a no-op because all the work is done in the implied get_local
+ // and set_local. COPYs are eliminated (and replaced with
+ // get_local/set_local) in the ExplicitLocals pass.
+ let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in
+ def COPY_#vt : I<(outs vt:$res), (ins vt:$src), [], "copy_local\t$res, $src">;
+
+ // TEE is similar to COPY, but writes two copies of its result. Typically
+ // this would be used to stackify one result and write the other result to a
+ // local.
+ let isAsCheapAsAMove = 1, isCodeGenOnly = 1 in
+ def TEE_#vt : I<(outs vt:$res, vt:$also), (ins vt:$src), [],
+ "tee_local\t$res, $also, $src">;
+
+ // This is the actual get_local instruction in wasm. These are made explicit
+ // by the ExplicitLocals pass. It has mayLoad because it reads from a wasm
+ // local, which is a side effect not otherwise modeled in LLVM.
+ let mayLoad = 1, isAsCheapAsAMove = 1 in
+ def GET_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local), [],
+ "get_local\t$res, $local", 0x20>;
+
+ // This is the actual set_local instruction in wasm. These are made explicit
+ // by the ExplicitLocals pass. It has mayStore because it writes to a wasm
+ // local, which is a side effect not otherwise modeled in LLVM.
+ let mayStore = 1, isAsCheapAsAMove = 1 in
+ def SET_LOCAL_#vt : I<(outs), (ins local_op:$local, vt:$src), [],
+ "set_local\t$local, $src", 0x21>;
+
+ // This is the actual tee_local instruction in wasm. TEEs are turned into
+ // TEE_LOCALs by the ExplicitLocals pass. It has mayStore for the same reason
+ // as SET_LOCAL.
+ let mayStore = 1, isAsCheapAsAMove = 1 in
+ def TEE_LOCAL_#vt : I<(outs vt:$res), (ins local_op:$local, vt:$src), [],
+ "tee_local\t$res, $local, $src", 0x22>;
+
+ // Unused values must be dropped in some contexts.
+ def DROP_#vt : I<(outs), (ins vt:$src), [],
+ "drop\t$src", 0x1a>;
+
+ let mayLoad = 1 in
+ def GET_GLOBAL_#vt : I<(outs vt:$res), (ins global_op:$local), [],
+ "get_global\t$res, $local", 0x23>;
+
+ let mayStore = 1 in
+ def SET_GLOBAL_#vt : I<(outs), (ins global_op:$local, vt:$src), [],
+ "set_global\t$local, $src", 0x24>;
+
+} // hasSideEffects = 0
+}
+defm : LOCAL<I32>;
+defm : LOCAL<I64>;
+defm : LOCAL<F32>;
+defm : LOCAL<F64>;
+defm : LOCAL<V128>, Requires<[HasSIMD128]>;
+
+let isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1 in {
+def CONST_I32 : I<(outs I32:$res), (ins i32imm_op:$imm),
+ [(set I32:$res, imm:$imm)],
+ "i32.const\t$res, $imm", 0x41>;
+def CONST_I64 : I<(outs I64:$res), (ins i64imm_op:$imm),
+ [(set I64:$res, imm:$imm)],
+ "i64.const\t$res, $imm", 0x42>;
+def CONST_F32 : I<(outs F32:$res), (ins f32imm_op:$imm),
+ [(set F32:$res, fpimm:$imm)],
+ "f32.const\t$res, $imm", 0x43>;
+def CONST_F64 : I<(outs F64:$res), (ins f64imm_op:$imm),
+ [(set F64:$res, fpimm:$imm)],
+ "f64.const\t$res, $imm", 0x44>;
+} // isMoveImm = 1, isAsCheapAsAMove = 1, isReMaterializable = 1
+
+} // Defs = [ARGUMENTS]
+
+def : Pat<(i32 (WebAssemblywrapper tglobaladdr:$addr)),
+ (CONST_I32 tglobaladdr:$addr)>;
+def : Pat<(i32 (WebAssemblywrapper texternalsym:$addr)),
+ (CONST_I32 texternalsym:$addr)>;
+
+//===----------------------------------------------------------------------===//
+// Additional sets of instructions.
+//===----------------------------------------------------------------------===//
+
+include "WebAssemblyInstrMemory.td"
+include "WebAssemblyInstrCall.td"
+include "WebAssemblyInstrControl.td"
+include "WebAssemblyInstrInteger.td"
+include "WebAssemblyInstrConv.td"
+include "WebAssemblyInstrFloat.td"
+include "WebAssemblyInstrAtomics.td"
+include "WebAssemblyInstrSIMD.td"
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td
new file mode 100644
index 000000000000..e872dc219846
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrInteger.td
@@ -0,0 +1,97 @@
+// WebAssemblyInstrInteger.td-WebAssembly Integer codegen -------*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly Integer operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+let Defs = [ARGUMENTS] in {
+
+// The spaces after the names are for aesthetic purposes only, to make
+// operands line up vertically after tab expansion.
+let isCommutable = 1 in
+defm ADD : BinaryInt<add, "add ", 0x6a, 0x7c>;
+defm SUB : BinaryInt<sub, "sub ", 0x6b, 0x7d>;
+let isCommutable = 1 in
+defm MUL : BinaryInt<mul, "mul ", 0x6c, 0x7e>;
+// Divide and remainder trap on a zero denominator.
+let hasSideEffects = 1 in {
+defm DIV_S : BinaryInt<sdiv, "div_s", 0x6d, 0x7f>;
+defm DIV_U : BinaryInt<udiv, "div_u", 0x6e, 0x80>;
+defm REM_S : BinaryInt<srem, "rem_s", 0x6f, 0x81>;
+defm REM_U : BinaryInt<urem, "rem_u", 0x70, 0x82>;
+} // hasSideEffects = 1
+let isCommutable = 1 in {
+defm AND : BinaryInt<and, "and ", 0x71, 0x83>;
+defm OR : BinaryInt<or, "or ", 0x72, 0x84>;
+defm XOR : BinaryInt<xor, "xor ", 0x73, 0x85>;
+} // isCommutable = 1
+defm SHL : BinaryInt<shl, "shl ", 0x74, 0x86>;
+defm SHR_S : BinaryInt<sra, "shr_s", 0x75, 0x87>;
+defm SHR_U : BinaryInt<srl, "shr_u", 0x76, 0x88>;
+defm ROTL : BinaryInt<rotl, "rotl", 0x77, 0x89>;
+defm ROTR : BinaryInt<rotr, "rotr", 0x78, 0x8a>;
+
+let isCommutable = 1 in {
+defm EQ : ComparisonInt<SETEQ, "eq ", 0x46, 0x51>;
+defm NE : ComparisonInt<SETNE, "ne ", 0x47, 0x52>;
+} // isCommutable = 1
+defm LT_S : ComparisonInt<SETLT, "lt_s", 0x48, 0x53>;
+defm LT_U : ComparisonInt<SETULT, "lt_u", 0x49, 0x54>;
+defm GT_S : ComparisonInt<SETGT, "gt_s", 0x4a, 0x55>;
+defm GT_U : ComparisonInt<SETUGT, "gt_u", 0x4b, 0x56>;
+defm LE_S : ComparisonInt<SETLE, "le_s", 0x4c, 0x57>;
+defm LE_U : ComparisonInt<SETULE, "le_u", 0x4d, 0x58>;
+defm GE_S : ComparisonInt<SETGE, "ge_s", 0x4e, 0x59>;
+defm GE_U : ComparisonInt<SETUGE, "ge_u", 0x4f, 0x5a>;
+
+defm CLZ : UnaryInt<ctlz, "clz ", 0x67, 0x79>;
+defm CTZ : UnaryInt<cttz, "ctz ", 0x68, 0x7a>;
+defm POPCNT : UnaryInt<ctpop, "popcnt", 0x69, 0x7b>;
+
+def EQZ_I32 : I<(outs I32:$dst), (ins I32:$src),
+ [(set I32:$dst, (setcc I32:$src, 0, SETEQ))],
+ "i32.eqz \t$dst, $src", 0x45>;
+def EQZ_I64 : I<(outs I32:$dst), (ins I64:$src),
+ [(set I32:$dst, (setcc I64:$src, 0, SETEQ))],
+ "i64.eqz \t$dst, $src", 0x50>;
+
+} // Defs = [ARGUMENTS]
+
+// Optimize away an explicit mask on a rotate count.
+def : Pat<(rotl I32:$lhs, (and I32:$rhs, 31)), (ROTL_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(rotr I32:$lhs, (and I32:$rhs, 31)), (ROTR_I32 I32:$lhs, I32:$rhs)>;
+def : Pat<(rotl I64:$lhs, (and I64:$rhs, 63)), (ROTL_I64 I64:$lhs, I64:$rhs)>;
+def : Pat<(rotr I64:$lhs, (and I64:$rhs, 63)), (ROTR_I64 I64:$lhs, I64:$rhs)>;
+
+let Defs = [ARGUMENTS] in {
+
+def SELECT_I32 : I<(outs I32:$dst), (ins I32:$lhs, I32:$rhs, I32:$cond),
+ [(set I32:$dst, (select I32:$cond, I32:$lhs, I32:$rhs))],
+ "i32.select\t$dst, $lhs, $rhs, $cond", 0x1b>;
+def SELECT_I64 : I<(outs I64:$dst), (ins I64:$lhs, I64:$rhs, I32:$cond),
+ [(set I64:$dst, (select I32:$cond, I64:$lhs, I64:$rhs))],
+ "i64.select\t$dst, $lhs, $rhs, $cond", 0x1b>;
+
+} // Defs = [ARGUMENTS]
+
+// ISD::SELECT requires its operand to conform to getBooleanContents, but
+// WebAssembly's select interprets any non-zero value as true, so we can fold
+// a setne with 0 into a select.
+def : Pat<(select (i32 (setne I32:$cond, 0)), I32:$lhs, I32:$rhs),
+ (SELECT_I32 I32:$lhs, I32:$rhs, I32:$cond)>;
+def : Pat<(select (i32 (setne I32:$cond, 0)), I64:$lhs, I64:$rhs),
+ (SELECT_I64 I64:$lhs, I64:$rhs, I32:$cond)>;
+
+// And again, this time with seteq instead of setne and the arms reversed.
+def : Pat<(select (i32 (seteq I32:$cond, 0)), I32:$lhs, I32:$rhs),
+ (SELECT_I32 I32:$rhs, I32:$lhs, I32:$cond)>;
+def : Pat<(select (i32 (seteq I32:$cond, 0)), I64:$lhs, I64:$rhs),
+ (SELECT_I64 I64:$rhs, I64:$lhs, I32:$cond)>;
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
new file mode 100644
index 000000000000..9d58895ca5a6
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrMemory.td
@@ -0,0 +1,542 @@
+// WebAssemblyInstrMemory.td-WebAssembly Memory codegen support -*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly Memory operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+// TODO:
+// - HasAddr64
+// - WebAssemblyTargetLowering having to do with atomics
+// - Each has optional alignment.
+
+// WebAssembly has i8/i16/i32/i64/f32/f64 memory types, but doesn't have i8/i16
+// local types. These memory-only types instead zero- or sign-extend into local
+// types when loading, and truncate when storing.
+
+// WebAssembly constant offsets are performed as unsigned with infinite
+// precision, so we need to check for NoUnsignedWrap so that we don't fold an
+// offset for an add that needs wrapping.
+def regPlusImm : PatFrag<(ops node:$addr, node:$off),
+ (add node:$addr, node:$off),
+ [{ return N->getFlags().hasNoUnsignedWrap(); }]>;
+
+// Treat an 'or' node as an 'add' if the or'ed bits are known to be zero.
+def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
+ return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
+
+ KnownBits Known0;
+ CurDAG->computeKnownBits(N->getOperand(0), Known0, 0);
+ KnownBits Known1;
+ CurDAG->computeKnownBits(N->getOperand(1), Known1, 0);
+ return (~Known0.Zero & ~Known1.Zero) == 0;
+}]>;
+
+// GlobalAddresses are conceptually unsigned values, so we can also fold them
+// into immediate values as long as the add is 'nuw'.
+// TODO: We'd like to also match GA offsets but there are cases where the
+// register can have a negative value. Find out what more we can do.
+def regPlusGA : PatFrag<(ops node:$addr, node:$off),
+ (add node:$addr, node:$off),
+ [{
+ return N->getFlags().hasNoUnsignedWrap();
+}]>;
+
+// We don't need a regPlusES because external symbols never have constant
+// offsets folded into them, so we can just use add.
+
+let Defs = [ARGUMENTS] in {
+
+// Defines atomic and non-atomic loads, regular and extending.
+class WebAssemblyLoad<WebAssemblyRegClass rc, string Name, int Opcode> :
+ I<(outs rc:$dst),
+ (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+ [], !strconcat(Name, "\t$dst, ${off}(${addr})${p2align}"), Opcode>;
+
+// Basic load.
+// FIXME: When we can break syntax compatibility, reorder the fields in the
+// asmstrings to match the binary encoding.
+def LOAD_I32 : WebAssemblyLoad<I32, "i32.load", 0x28>;
+def LOAD_I64 : WebAssemblyLoad<I64, "i64.load", 0x29>;
+def LOAD_F32 : WebAssemblyLoad<F32, "f32.load", 0x2a>;
+def LOAD_F64 : WebAssemblyLoad<F64, "f64.load", 0x2b>;
+
+} // Defs = [ARGUMENTS]
+
+// Select loads with no constant offset.
+class LoadPatNoOffset<ValueType ty, PatFrag node, I inst> :
+ Pat<(ty (node I32:$addr)), (inst 0, 0, $addr)>;
+
+def : LoadPatNoOffset<i32, load, LOAD_I32>;
+def : LoadPatNoOffset<i64, load, LOAD_I64>;
+def : LoadPatNoOffset<f32, load, LOAD_F32>;
+def : LoadPatNoOffset<f64, load, LOAD_F64>;
+
+
+// Select loads with a constant offset.
+
+// Pattern with address + immediate offset
+class LoadPatImmOff<ValueType ty, PatFrag loadkind, PatFrag operand, I inst> :
+ Pat<(ty (loadkind (operand I32:$addr, imm:$off))),
+ (inst 0, imm:$off, $addr)>;
+
+def : LoadPatImmOff<i32, load, regPlusImm, LOAD_I32>;
+def : LoadPatImmOff<i64, load, regPlusImm, LOAD_I64>;
+def : LoadPatImmOff<f32, load, regPlusImm, LOAD_F32>;
+def : LoadPatImmOff<f64, load, regPlusImm, LOAD_F64>;
+def : LoadPatImmOff<i32, load, or_is_add, LOAD_I32>;
+def : LoadPatImmOff<i64, load, or_is_add, LOAD_I64>;
+def : LoadPatImmOff<f32, load, or_is_add, LOAD_F32>;
+def : LoadPatImmOff<f64, load, or_is_add, LOAD_F64>;
+
+class LoadPatGlobalAddr<ValueType ty, PatFrag loadkind, I inst> :
+ Pat<(ty (loadkind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)))),
+ (inst 0, tglobaladdr:$off, $addr)>;
+
+def : LoadPatGlobalAddr<i32, load, LOAD_I32>;
+def : LoadPatGlobalAddr<i64, load, LOAD_I64>;
+def : LoadPatGlobalAddr<f32, load, LOAD_F32>;
+def : LoadPatGlobalAddr<f64, load, LOAD_F64>;
+
+class LoadPatExternalSym<ValueType ty, PatFrag loadkind, I inst> :
+ Pat<(ty (loadkind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)))),
+ (inst 0, texternalsym:$off, $addr)>;
+def : LoadPatExternalSym<i32, load, LOAD_I32>;
+def : LoadPatExternalSym<i64, load, LOAD_I64>;
+def : LoadPatExternalSym<f32, load, LOAD_F32>;
+def : LoadPatExternalSym<f64, load, LOAD_F64>;
+
+
+// Select loads with just a constant offset.
+class LoadPatOffsetOnly<ValueType ty, PatFrag loadkind, I inst> :
+ Pat<(ty (loadkind imm:$off)), (inst 0, imm:$off, (CONST_I32 0))>;
+
+def : LoadPatOffsetOnly<i32, load, LOAD_I32>;
+def : LoadPatOffsetOnly<i64, load, LOAD_I64>;
+def : LoadPatOffsetOnly<f32, load, LOAD_F32>;
+def : LoadPatOffsetOnly<f64, load, LOAD_F64>;
+
+class LoadPatGlobalAddrOffOnly<ValueType ty, PatFrag loadkind, I inst> :
+ Pat<(ty (loadkind (WebAssemblywrapper tglobaladdr:$off))),
+ (inst 0, tglobaladdr:$off, (CONST_I32 0))>;
+
+def : LoadPatGlobalAddrOffOnly<i32, load, LOAD_I32>;
+def : LoadPatGlobalAddrOffOnly<i64, load, LOAD_I64>;
+def : LoadPatGlobalAddrOffOnly<f32, load, LOAD_F32>;
+def : LoadPatGlobalAddrOffOnly<f64, load, LOAD_F64>;
+
+class LoadPatExternSymOffOnly<ValueType ty, PatFrag loadkind, I inst> :
+ Pat<(ty (loadkind (WebAssemblywrapper texternalsym:$off))),
+ (inst 0, texternalsym:$off, (CONST_I32 0))>;
+def : LoadPatExternSymOffOnly<i32, load, LOAD_I32>;
+def : LoadPatExternSymOffOnly<i64, load, LOAD_I64>;
+def : LoadPatExternSymOffOnly<f32, load, LOAD_F32>;
+def : LoadPatExternSymOffOnly<f64, load, LOAD_F64>;
+
+let Defs = [ARGUMENTS] in {
+
+// Extending load.
+def LOAD8_S_I32 : WebAssemblyLoad<I32, "i32.load8_s", 0x2c>;
+def LOAD8_U_I32 : WebAssemblyLoad<I32, "i32.load8_u", 0x2d>;
+def LOAD16_S_I32 : WebAssemblyLoad<I32, "i32.load16_s", 0x2e>;
+def LOAD16_U_I32 : WebAssemblyLoad<I32, "i32.load16_u", 0x2f>;
+def LOAD8_S_I64 : WebAssemblyLoad<I64, "i64.load8_s", 0x30>;
+def LOAD8_U_I64 : WebAssemblyLoad<I64, "i64.load8_u", 0x31>;
+def LOAD16_S_I64 : WebAssemblyLoad<I64, "i64.load16_s", 0x32>;
+def LOAD16_U_I64 : WebAssemblyLoad<I64, "i64.load16_u", 0x32>;
+def LOAD32_S_I64 : WebAssemblyLoad<I64, "i64.load32_s", 0x34>;
+def LOAD32_U_I64 : WebAssemblyLoad<I64, "i64.load32_u", 0x35>;
+
+} // Defs = [ARGUMENTS]
+
+// Select extending loads with no constant offset.
+def : LoadPatNoOffset<i32, sextloadi8, LOAD8_S_I32>;
+def : LoadPatNoOffset<i32, zextloadi8, LOAD8_U_I32>;
+def : LoadPatNoOffset<i32, sextloadi16, LOAD16_S_I32>;
+def : LoadPatNoOffset<i32, zextloadi16, LOAD16_U_I32>;
+def : LoadPatNoOffset<i64, sextloadi8, LOAD8_S_I64>;
+def : LoadPatNoOffset<i64, zextloadi8, LOAD8_U_I64>;
+def : LoadPatNoOffset<i64, sextloadi16, LOAD16_S_I64>;
+def : LoadPatNoOffset<i64, zextloadi16, LOAD16_U_I64>;
+def : LoadPatNoOffset<i64, sextloadi32, LOAD32_S_I64>;
+def : LoadPatNoOffset<i64, zextloadi32, LOAD32_U_I64>;
+
+// Select extending loads with a constant offset.
+def : LoadPatImmOff<i32, sextloadi8, regPlusImm, LOAD8_S_I32>;
+def : LoadPatImmOff<i32, zextloadi8, regPlusImm, LOAD8_U_I32>;
+def : LoadPatImmOff<i32, sextloadi16, regPlusImm, LOAD16_S_I32>;
+def : LoadPatImmOff<i32, zextloadi16, regPlusImm, LOAD16_U_I32>;
+def : LoadPatImmOff<i64, sextloadi8, regPlusImm, LOAD8_S_I64>;
+def : LoadPatImmOff<i64, zextloadi8, regPlusImm, LOAD8_U_I64>;
+def : LoadPatImmOff<i64, sextloadi16, regPlusImm, LOAD16_S_I64>;
+def : LoadPatImmOff<i64, zextloadi16, regPlusImm, LOAD16_U_I64>;
+def : LoadPatImmOff<i64, sextloadi32, regPlusImm, LOAD32_S_I64>;
+def : LoadPatImmOff<i64, zextloadi32, regPlusImm, LOAD32_U_I64>;
+
+def : LoadPatImmOff<i32, sextloadi8, or_is_add, LOAD8_S_I32>;
+def : LoadPatImmOff<i32, zextloadi8, or_is_add, LOAD8_U_I32>;
+def : LoadPatImmOff<i32, sextloadi16, or_is_add, LOAD16_S_I32>;
+def : LoadPatImmOff<i32, zextloadi16, or_is_add, LOAD16_U_I32>;
+def : LoadPatImmOff<i64, sextloadi8, or_is_add, LOAD8_S_I64>;
+def : LoadPatImmOff<i64, zextloadi8, or_is_add, LOAD8_U_I64>;
+def : LoadPatImmOff<i64, sextloadi16, or_is_add, LOAD16_S_I64>;
+def : LoadPatImmOff<i64, zextloadi16, or_is_add, LOAD16_U_I64>;
+def : LoadPatImmOff<i64, sextloadi32, or_is_add, LOAD32_S_I64>;
+def : LoadPatImmOff<i64, zextloadi32, or_is_add, LOAD32_U_I64>;
+
+def : LoadPatGlobalAddr<i32, sextloadi8, LOAD8_S_I32>;
+def : LoadPatGlobalAddr<i32, zextloadi8, LOAD8_U_I32>;
+def : LoadPatGlobalAddr<i32, sextloadi16, LOAD16_S_I32>;
+def : LoadPatGlobalAddr<i32, zextloadi8, LOAD16_U_I32>;
+
+def : LoadPatGlobalAddr<i64, sextloadi8, LOAD8_S_I64>;
+def : LoadPatGlobalAddr<i64, zextloadi8, LOAD8_U_I64>;
+def : LoadPatGlobalAddr<i64, sextloadi16, LOAD16_S_I64>;
+def : LoadPatGlobalAddr<i64, zextloadi16, LOAD16_U_I64>;
+def : LoadPatGlobalAddr<i64, sextloadi32, LOAD32_S_I64>;
+def : LoadPatGlobalAddr<i64, zextloadi32, LOAD32_U_I64>;
+
+def : LoadPatExternalSym<i32, sextloadi8, LOAD8_S_I32>;
+def : LoadPatExternalSym<i32, zextloadi8, LOAD8_U_I32>;
+def : LoadPatExternalSym<i32, sextloadi16, LOAD16_S_I32>;
+def : LoadPatExternalSym<i32, zextloadi16, LOAD16_U_I32>;
+def : LoadPatExternalSym<i64, sextloadi8, LOAD8_S_I64>;
+def : LoadPatExternalSym<i64, zextloadi8, LOAD8_U_I64>;
+def : LoadPatExternalSym<i64, sextloadi16, LOAD16_S_I64>;
+def : LoadPatExternalSym<i64, zextloadi16, LOAD16_U_I64>;
+def : LoadPatExternalSym<i64, sextloadi32, LOAD32_S_I64>;
+def : LoadPatExternalSym<i64, zextloadi32, LOAD32_U_I64>;
+
+
+// Select extending loads with just a constant offset.
+def : LoadPatOffsetOnly<i32, sextloadi8, LOAD8_S_I32>;
+def : LoadPatOffsetOnly<i32, zextloadi8, LOAD8_U_I32>;
+def : LoadPatOffsetOnly<i32, sextloadi16, LOAD16_S_I32>;
+def : LoadPatOffsetOnly<i32, zextloadi16, LOAD16_U_I32>;
+
+def : LoadPatOffsetOnly<i64, sextloadi8, LOAD8_S_I64>;
+def : LoadPatOffsetOnly<i64, zextloadi8, LOAD8_U_I64>;
+def : LoadPatOffsetOnly<i64, sextloadi16, LOAD16_S_I64>;
+def : LoadPatOffsetOnly<i64, zextloadi16, LOAD16_U_I64>;
+def : LoadPatOffsetOnly<i64, sextloadi32, LOAD32_S_I64>;
+def : LoadPatOffsetOnly<i64, zextloadi32, LOAD32_U_I64>;
+
+def : LoadPatGlobalAddrOffOnly<i32, sextloadi8, LOAD8_S_I32>;
+def : LoadPatGlobalAddrOffOnly<i32, zextloadi8, LOAD8_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i32, sextloadi16, LOAD16_S_I32>;
+def : LoadPatGlobalAddrOffOnly<i32, zextloadi16, LOAD16_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i64, sextloadi8, LOAD8_S_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, zextloadi8, LOAD8_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, sextloadi16, LOAD16_S_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, zextloadi16, LOAD16_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, sextloadi32, LOAD32_S_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, zextloadi32, LOAD32_U_I64>;
+
+def : LoadPatExternSymOffOnly<i32, sextloadi8, LOAD8_S_I32>;
+def : LoadPatExternSymOffOnly<i32, zextloadi8, LOAD8_U_I32>;
+def : LoadPatExternSymOffOnly<i32, sextloadi16, LOAD16_S_I32>;
+def : LoadPatExternSymOffOnly<i32, zextloadi16, LOAD16_U_I32>;
+def : LoadPatExternSymOffOnly<i64, sextloadi8, LOAD8_S_I64>;
+def : LoadPatExternSymOffOnly<i64, zextloadi8, LOAD8_U_I64>;
+def : LoadPatExternSymOffOnly<i64, sextloadi16, LOAD16_S_I64>;
+def : LoadPatExternSymOffOnly<i64, zextloadi16, LOAD16_U_I64>;
+def : LoadPatExternSymOffOnly<i64, sextloadi32, LOAD32_S_I64>;
+def : LoadPatExternSymOffOnly<i64, zextloadi32, LOAD32_U_I64>;
+
+// Resolve "don't care" extending loads to zero-extending loads. This is
+// somewhat arbitrary, but zero-extending is conceptually simpler.
+
+// Select "don't care" extending loads with no constant offset.
+def : LoadPatNoOffset<i32, extloadi8, LOAD8_U_I32>;
+def : LoadPatNoOffset<i32, extloadi16, LOAD16_U_I32>;
+def : LoadPatNoOffset<i64, extloadi8, LOAD8_U_I64>;
+def : LoadPatNoOffset<i64, extloadi16, LOAD16_U_I64>;
+def : LoadPatNoOffset<i64, extloadi32, LOAD32_U_I64>;
+
+// Select "don't care" extending loads with a constant offset.
+def : LoadPatImmOff<i32, extloadi8, regPlusImm, LOAD8_U_I32>;
+def : LoadPatImmOff<i32, extloadi16, regPlusImm, LOAD16_U_I32>;
+def : LoadPatImmOff<i64, extloadi8, regPlusImm, LOAD8_U_I64>;
+def : LoadPatImmOff<i64, extloadi16, regPlusImm, LOAD16_U_I64>;
+def : LoadPatImmOff<i64, extloadi32, regPlusImm, LOAD32_U_I64>;
+def : LoadPatImmOff<i32, extloadi8, or_is_add, LOAD8_U_I32>;
+def : LoadPatImmOff<i32, extloadi16, or_is_add, LOAD16_U_I32>;
+def : LoadPatImmOff<i64, extloadi8, or_is_add, LOAD8_U_I64>;
+def : LoadPatImmOff<i64, extloadi16, or_is_add, LOAD16_U_I64>;
+def : LoadPatImmOff<i64, extloadi32, or_is_add, LOAD32_U_I64>;
+def : LoadPatGlobalAddr<i32, extloadi8, LOAD8_U_I32>;
+def : LoadPatGlobalAddr<i32, extloadi16, LOAD16_U_I32>;
+def : LoadPatGlobalAddr<i64, extloadi8, LOAD8_U_I64>;
+def : LoadPatGlobalAddr<i64, extloadi16, LOAD16_U_I64>;
+def : LoadPatGlobalAddr<i64, extloadi32, LOAD32_U_I64>;
+def : LoadPatExternalSym<i32, extloadi8, LOAD8_U_I32>;
+def : LoadPatExternalSym<i32, extloadi16, LOAD16_U_I32>;
+def : LoadPatExternalSym<i64, extloadi8, LOAD8_U_I64>;
+def : LoadPatExternalSym<i64, extloadi16, LOAD16_U_I64>;
+def : LoadPatExternalSym<i64, extloadi32, LOAD32_U_I64>;
+
+// Select "don't care" extending loads with just a constant offset.
+def : LoadPatOffsetOnly<i32, extloadi8, LOAD8_U_I32>;
+def : LoadPatOffsetOnly<i32, extloadi16, LOAD16_U_I32>;
+def : LoadPatOffsetOnly<i64, extloadi8, LOAD8_U_I64>;
+def : LoadPatOffsetOnly<i64, extloadi16, LOAD16_U_I64>;
+def : LoadPatOffsetOnly<i64, extloadi32, LOAD32_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i32, extloadi8, LOAD8_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i32, extloadi16, LOAD16_U_I32>;
+def : LoadPatGlobalAddrOffOnly<i64, extloadi8, LOAD8_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, extloadi16, LOAD16_U_I64>;
+def : LoadPatGlobalAddrOffOnly<i64, extloadi32, LOAD32_U_I64>;
+def : LoadPatExternSymOffOnly<i32, extloadi8, LOAD8_U_I32>;
+def : LoadPatExternSymOffOnly<i32, extloadi16, LOAD16_U_I32>;
+def : LoadPatExternSymOffOnly<i64, extloadi8, LOAD8_U_I64>;
+def : LoadPatExternSymOffOnly<i64, extloadi16, LOAD16_U_I64>;
+def : LoadPatExternSymOffOnly<i64, extloadi32, LOAD32_U_I64>;
+
+
+let Defs = [ARGUMENTS] in {
+
+// Basic store.
+// Note: WebAssembly inverts SelectionDAG's usual operand order.
+def STORE_I32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ I32:$val), [],
+ "i32.store\t${off}(${addr})${p2align}, $val", 0x36>;
+def STORE_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ I64:$val), [],
+ "i64.store\t${off}(${addr})${p2align}, $val", 0x37>;
+def STORE_F32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ F32:$val), [],
+ "f32.store\t${off}(${addr})${p2align}, $val", 0x38>;
+def STORE_F64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ F64:$val), [],
+ "f64.store\t${off}(${addr})${p2align}, $val", 0x39>;
+
+} // Defs = [ARGUMENTS]
+
+// Select stores with no constant offset.
+def : Pat<(store I32:$val, I32:$addr), (STORE_I32 0, 0, I32:$addr, I32:$val)>;
+def : Pat<(store I64:$val, I32:$addr), (STORE_I64 0, 0, I32:$addr, I64:$val)>;
+def : Pat<(store F32:$val, I32:$addr), (STORE_F32 0, 0, I32:$addr, F32:$val)>;
+def : Pat<(store F64:$val, I32:$addr), (STORE_F64 0, 0, I32:$addr, F64:$val)>;
+
+// Select stores with a constant offset.
+def : Pat<(store I32:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE_I32 0, imm:$off, I32:$addr, I32:$val)>;
+def : Pat<(store I64:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(store F32:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE_F32 0, imm:$off, I32:$addr, F32:$val)>;
+def : Pat<(store F64:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE_F64 0, imm:$off, I32:$addr, F64:$val)>;
+def : Pat<(store I32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_I32 0, imm:$off, I32:$addr, I32:$val)>;
+def : Pat<(store I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(store F32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_F32 0, imm:$off, I32:$addr, F32:$val)>;
+def : Pat<(store F64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE_F64 0, imm:$off, I32:$addr, F64:$val)>;
+def : Pat<(store I32:$val, (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE_I32 0, tglobaladdr:$off, I32:$addr, I32:$val)>;
+def : Pat<(store I64:$val, (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>;
+def : Pat<(store F32:$val, (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE_F32 0, tglobaladdr:$off, I32:$addr, F32:$val)>;
+def : Pat<(store F64:$val, (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE_F64 0, tglobaladdr:$off, I32:$addr, F64:$val)>;
+def : Pat<(store I32:$val, (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE_I32 0, texternalsym:$off, I32:$addr, I32:$val)>;
+def : Pat<(store I64:$val, (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE_I64 0, texternalsym:$off, I32:$addr, I64:$val)>;
+def : Pat<(store F32:$val, (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE_F32 0, texternalsym:$off, I32:$addr, F32:$val)>;
+def : Pat<(store F64:$val, (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE_F64 0, texternalsym:$off, I32:$addr, F64:$val)>;
+
+// Select stores with just a constant offset.
+def : Pat<(store I32:$val, imm:$off),
+ (STORE_I32 0, imm:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(store I64:$val, imm:$off),
+ (STORE_I64 0, imm:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(store F32:$val, imm:$off),
+ (STORE_F32 0, imm:$off, (CONST_I32 0), F32:$val)>;
+def : Pat<(store F64:$val, imm:$off),
+ (STORE_F64 0, imm:$off, (CONST_I32 0), F64:$val)>;
+def : Pat<(store I32:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE_I32 0, tglobaladdr:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(store I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(store F32:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE_F32 0, tglobaladdr:$off, (CONST_I32 0), F32:$val)>;
+def : Pat<(store F64:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE_F64 0, tglobaladdr:$off, (CONST_I32 0), F64:$val)>;
+def : Pat<(store I32:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE_I32 0, texternalsym:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(store I64:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(store F32:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE_F32 0, texternalsym:$off, (CONST_I32 0), F32:$val)>;
+def : Pat<(store F64:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE_F64 0, texternalsym:$off, (CONST_I32 0), F64:$val)>;
+
+let Defs = [ARGUMENTS] in {
+
+// Truncating store.
+def STORE8_I32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ I32:$val), [],
+ "i32.store8\t${off}(${addr})${p2align}, $val", 0x3a>;
+def STORE16_I32 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ I32:$val), [],
+ "i32.store16\t${off}(${addr})${p2align}, $val", 0x3b>;
+def STORE8_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ I64:$val), [],
+ "i64.store8\t${off}(${addr})${p2align}, $val", 0x3c>;
+def STORE16_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ I64:$val), [],
+ "i64.store16\t${off}(${addr})${p2align}, $val", 0x3d>;
+def STORE32_I64 : I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr,
+ I64:$val), [],
+ "i64.store32\t${off}(${addr})${p2align}, $val", 0x3e>;
+
+} // Defs = [ARGUMENTS]
+
+// Select truncating stores with no constant offset.
+def : Pat<(truncstorei8 I32:$val, I32:$addr),
+ (STORE8_I32 0, 0, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei16 I32:$val, I32:$addr),
+ (STORE16_I32 0, 0, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei8 I64:$val, I32:$addr),
+ (STORE8_I64 0, 0, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei16 I64:$val, I32:$addr),
+ (STORE16_I64 0, 0, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei32 I64:$val, I32:$addr),
+ (STORE32_I64 0, 0, I32:$addr, I64:$val)>;
+
+// Select truncating stores with a constant offset.
+def : Pat<(truncstorei8 I32:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE8_I32 0, imm:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei16 I32:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE16_I32 0, imm:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei8 I64:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE8_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei16 I64:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE16_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei32 I64:$val, (regPlusImm I32:$addr, imm:$off)),
+ (STORE32_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei8 I32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE8_I32 0, imm:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei16 I32:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE16_I32 0, imm:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei8 I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE8_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei16 I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE16_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei32 I64:$val, (or_is_add I32:$addr, imm:$off)),
+ (STORE32_I64 0, imm:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei8 I32:$val,
+ (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE8_I32 0, tglobaladdr:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei16 I32:$val,
+ (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE16_I32 0, tglobaladdr:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei8 I64:$val,
+ (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE8_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei16 I64:$val,
+ (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE16_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei32 I64:$val,
+ (regPlusGA I32:$addr,
+ (WebAssemblywrapper tglobaladdr:$off))),
+ (STORE32_I64 0, tglobaladdr:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei8 I32:$val, (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE8_I32 0, texternalsym:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei16 I32:$val,
+ (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE16_I32 0, texternalsym:$off, I32:$addr, I32:$val)>;
+def : Pat<(truncstorei8 I64:$val,
+ (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE8_I64 0, texternalsym:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei16 I64:$val,
+ (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE16_I64 0, texternalsym:$off, I32:$addr, I64:$val)>;
+def : Pat<(truncstorei32 I64:$val,
+ (add I32:$addr,
+ (WebAssemblywrapper texternalsym:$off))),
+ (STORE32_I64 0, texternalsym:$off, I32:$addr, I64:$val)>;
+
+// Select truncating stores with just a constant offset.
+def : Pat<(truncstorei8 I32:$val, imm:$off),
+ (STORE8_I32 0, imm:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(truncstorei16 I32:$val, imm:$off),
+ (STORE16_I32 0, imm:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(truncstorei8 I64:$val, imm:$off),
+ (STORE8_I64 0, imm:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei16 I64:$val, imm:$off),
+ (STORE16_I64 0, imm:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei32 I64:$val, imm:$off),
+ (STORE32_I64 0, imm:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei8 I32:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE8_I32 0, tglobaladdr:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(truncstorei16 I32:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE16_I32 0, tglobaladdr:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(truncstorei8 I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE8_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei16 I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE16_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei32 I64:$val, (WebAssemblywrapper tglobaladdr:$off)),
+ (STORE32_I64 0, tglobaladdr:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei8 I32:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE8_I32 0, texternalsym:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(truncstorei16 I32:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE16_I32 0, texternalsym:$off, (CONST_I32 0), I32:$val)>;
+def : Pat<(truncstorei8 I64:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE8_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei16 I64:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE16_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>;
+def : Pat<(truncstorei32 I64:$val, (WebAssemblywrapper texternalsym:$off)),
+ (STORE32_I64 0, texternalsym:$off, (CONST_I32 0), I64:$val)>;
+
+let Defs = [ARGUMENTS] in {
+
+// Current memory size.
+def CURRENT_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags),
+ [],
+ "current_memory\t$dst", 0x3f>,
+ Requires<[HasAddr32]>;
+
+// Grow memory.
+def GROW_MEMORY_I32 : I<(outs I32:$dst), (ins i32imm:$flags, I32:$delta),
+ [],
+ "grow_memory\t$dst, $delta", 0x40>,
+ Requires<[HasAddr32]>;
+
+} // Defs = [ARGUMENTS]
+
+def : Pat<(int_wasm_current_memory),
+ (CURRENT_MEMORY_I32 0)>;
+def : Pat<(int_wasm_grow_memory I32:$delta),
+ (GROW_MEMORY_I32 0, $delta)>;
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
new file mode 100644
index 000000000000..e403534d580a
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -0,0 +1,19 @@
+// WebAssemblyInstrSIMD.td - WebAssembly SIMD codegen support -*- tablegen -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief WebAssembly SIMD operand code-gen constructs.
+///
+//===----------------------------------------------------------------------===//
+
+let isCommutable = 1 in {
+defm ADD : SIMDBinary<add, fadd, "add ">;
+defm MUL: SIMDBinary<mul, fmul, "mul ">;
+} // isCommutable = 1
+defm SUB: SIMDBinary<sub, fsub, "sub ">;
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
new file mode 100644
index 000000000000..5b867aa763a1
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp
@@ -0,0 +1,135 @@
+//===-- WebAssemblyLowerBrUnless.cpp - Lower br_unless --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file lowers br_unless into br_if with an inverted condition.
+///
+/// br_unless is not currently in the spec, but it's very convenient for LLVM
+/// to use. This pass allows LLVM to use it, for now.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower-br_unless"
+
+namespace {
+class WebAssemblyLowerBrUnless final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Lower br_unless";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyLowerBrUnless() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyLowerBrUnless::ID = 0;
+FunctionPass *llvm::createWebAssemblyLowerBrUnless() {
+ return new WebAssemblyLowerBrUnless();
+}
+
+bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Lowering br_unless **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ auto &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ auto &MRI = MF.getRegInfo();
+
+ for (auto &MBB : MF) {
+ for (auto MII = MBB.begin(); MII != MBB.end();) {
+ MachineInstr *MI = &*MII++;
+ if (MI->getOpcode() != WebAssembly::BR_UNLESS)
+ continue;
+
+ unsigned Cond = MI->getOperand(1).getReg();
+ bool Inverted = false;
+
+ // Attempt to invert the condition in place.
+ if (MFI.isVRegStackified(Cond)) {
+ assert(MRI.hasOneDef(Cond));
+ MachineInstr *Def = MRI.getVRegDef(Cond);
+ switch (Def->getOpcode()) {
+ using namespace WebAssembly;
+ case EQ_I32: Def->setDesc(TII.get(NE_I32)); Inverted = true; break;
+ case NE_I32: Def->setDesc(TII.get(EQ_I32)); Inverted = true; break;
+ case GT_S_I32: Def->setDesc(TII.get(LE_S_I32)); Inverted = true; break;
+ case GE_S_I32: Def->setDesc(TII.get(LT_S_I32)); Inverted = true; break;
+ case LT_S_I32: Def->setDesc(TII.get(GE_S_I32)); Inverted = true; break;
+ case LE_S_I32: Def->setDesc(TII.get(GT_S_I32)); Inverted = true; break;
+ case GT_U_I32: Def->setDesc(TII.get(LE_U_I32)); Inverted = true; break;
+ case GE_U_I32: Def->setDesc(TII.get(LT_U_I32)); Inverted = true; break;
+ case LT_U_I32: Def->setDesc(TII.get(GE_U_I32)); Inverted = true; break;
+ case LE_U_I32: Def->setDesc(TII.get(GT_U_I32)); Inverted = true; break;
+ case EQ_I64: Def->setDesc(TII.get(NE_I64)); Inverted = true; break;
+ case NE_I64: Def->setDesc(TII.get(EQ_I64)); Inverted = true; break;
+ case GT_S_I64: Def->setDesc(TII.get(LE_S_I64)); Inverted = true; break;
+ case GE_S_I64: Def->setDesc(TII.get(LT_S_I64)); Inverted = true; break;
+ case LT_S_I64: Def->setDesc(TII.get(GE_S_I64)); Inverted = true; break;
+ case LE_S_I64: Def->setDesc(TII.get(GT_S_I64)); Inverted = true; break;
+ case GT_U_I64: Def->setDesc(TII.get(LE_U_I64)); Inverted = true; break;
+ case GE_U_I64: Def->setDesc(TII.get(LT_U_I64)); Inverted = true; break;
+ case LT_U_I64: Def->setDesc(TII.get(GE_U_I64)); Inverted = true; break;
+ case LE_U_I64: Def->setDesc(TII.get(GT_U_I64)); Inverted = true; break;
+ case EQ_F32: Def->setDesc(TII.get(NE_F32)); Inverted = true; break;
+ case NE_F32: Def->setDesc(TII.get(EQ_F32)); Inverted = true; break;
+ case EQ_F64: Def->setDesc(TII.get(NE_F64)); Inverted = true; break;
+ case NE_F64: Def->setDesc(TII.get(EQ_F64)); Inverted = true; break;
+ case EQZ_I32: {
+ // Invert an eqz by replacing it with its operand.
+ Cond = Def->getOperand(1).getReg();
+ Def->eraseFromParent();
+ Inverted = true;
+ break;
+ }
+ default: break;
+ }
+ }
+
+ // If we weren't able to invert the condition in place. Insert an
+ // instruction to invert it.
+ if (!Inverted) {
+ unsigned Tmp = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
+ BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::EQZ_I32), Tmp)
+ .addReg(Cond);
+ MFI.stackifyVReg(Tmp);
+ Cond = Tmp;
+ Inverted = true;
+ }
+
+ // The br_unless condition has now been inverted. Insert a br_if and
+ // delete the br_unless.
+ assert(Inverted);
+ BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::BR_IF))
+ .add(MI->getOperand(0))
+ .addReg(Cond);
+ MBB.erase(MI);
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
new file mode 100644
index 000000000000..f0b6a3e35dba
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -0,0 +1,1179 @@
+//=== WebAssemblyLowerEmscriptenEHSjLj.cpp - Lower exceptions for Emscripten =//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file lowers exception-related instructions and setjmp/longjmp
+/// function calls in order to use Emscripten's JavaScript try and catch
+/// mechanism.
+///
+/// To handle exceptions and setjmp/longjmps, this scheme relies on JavaScript's
+/// try and catch syntax and relevant exception-related libraries implemented
+/// in JavaScript glue code that will be produced by Emscripten. This is similar
+/// to the current Emscripten asm.js exception handling in fastcomp. For
+/// fastcomp's EH / SjLj scheme, see these files in fastcomp LLVM branch:
+/// (Location: https://github.com/kripken/emscripten-fastcomp)
+/// lib/Target/JSBackend/NaCl/LowerEmExceptionsPass.cpp
+/// lib/Target/JSBackend/NaCl/LowerEmSetjmp.cpp
+/// lib/Target/JSBackend/JSBackend.cpp
+/// lib/Target/JSBackend/CallHandlers.h
+///
+/// * Exception handling
+/// This pass lowers invokes and landingpads into library functions in JS glue
+/// code. Invokes are lowered into function wrappers called invoke wrappers that
+/// exist in JS side, which wraps the original function call with JS try-catch.
+/// If an exception occurred, cxa_throw() function in JS side sets some
+/// variables (see below) so we can check whether an exception occurred from
+/// wasm code and handle it appropriately.
+///
+/// * Setjmp-longjmp handling
+/// This pass lowers setjmp to a reasonably-performant approach for emscripten.
+/// The idea is that each block with a setjmp is broken up into two parts: the
+/// part containing setjmp and the part right after the setjmp. The latter part
+/// is either reached from the setjmp, or later from a longjmp. To handle the
+/// longjmp, all calls that might longjmp are also called using invoke wrappers
+/// and thus JS / try-catch. JS longjmp() function also sets some variables so
+/// we can check / whether a longjmp occurred from wasm code. Each block with a
+/// function call that might longjmp is also split up after the longjmp call.
+/// After the longjmp call, we check whether a longjmp occurred, and if it did,
+/// which setjmp it corresponds to, and jump to the right post-setjmp block.
+/// We assume setjmp-longjmp handling always run after EH handling, which means
+/// we don't expect any exception-related instructions when SjLj runs.
+/// FIXME Currently this scheme does not support indirect call of setjmp,
+/// because of the limitation of the scheme itself. fastcomp does not support it
+/// either.
+///
+/// In detail, this pass does following things:
+///
+/// 1) Create three global variables: __THREW__, __threwValue, and __tempRet0.
+/// __tempRet0 will be set within __cxa_find_matching_catch() function in
+/// JS library, and __THREW__ and __threwValue will be set in invoke wrappers
+/// in JS glue code. For what invoke wrappers are, refer to 3). These
+/// variables are used for both exceptions and setjmp/longjmps.
+/// __THREW__ indicates whether an exception or a longjmp occurred or not. 0
+/// means nothing occurred, 1 means an exception occurred, and other numbers
+/// mean a longjmp occurred. In the case of longjmp, __threwValue variable
+/// indicates the corresponding setjmp buffer the longjmp corresponds to.
+/// In exception handling, __tempRet0 indicates the type of an exception
+/// caught, and in setjmp/longjmp, it means the second argument to longjmp
+/// function.
+///
+/// * Exception handling
+///
+/// 2) Create setThrew and setTempRet0 functions.
+/// The global variables created in 1) will exist in wasm address space,
+/// but their values should be set in JS code, so we provide these functions
+/// as interfaces to JS glue code. These functions are equivalent to the
+/// following JS functions, which actually exist in asm.js version of JS
+/// library.
+///
+/// function setThrew(threw, value) {
+/// if (__THREW__ == 0) {
+/// __THREW__ = threw;
+/// __threwValue = value;
+/// }
+/// }
+///
+/// function setTempRet0(value) {
+/// __tempRet0 = value;
+/// }
+///
+/// 3) Lower
+/// invoke @func(arg1, arg2) to label %invoke.cont unwind label %lpad
+/// into
+/// __THREW__ = 0;
+/// call @__invoke_SIG(func, arg1, arg2)
+/// %__THREW__.val = __THREW__;
+/// __THREW__ = 0;
+/// if (%__THREW__.val == 1)
+/// goto %lpad
+/// else
+/// goto %invoke.cont
+/// SIG is a mangled string generated based on the LLVM IR-level function
+/// signature. After LLVM IR types are lowered to the target wasm types,
+/// the names for these wrappers will change based on wasm types as well,
+/// as in invoke_vi (function takes an int and returns void). The bodies of
+/// these wrappers will be generated in JS glue code, and inside those
+/// wrappers we use JS try-catch to generate actual exception effects. It
+/// also calls the original callee function. An example wrapper in JS code
+/// would look like this:
+/// function invoke_vi(index,a1) {
+/// try {
+/// Module["dynCall_vi"](index,a1); // This calls original callee
+/// } catch(e) {
+/// if (typeof e !== 'number' && e !== 'longjmp') throw e;
+/// asm["setThrew"](1, 0); // setThrew is called here
+/// }
+/// }
+/// If an exception is thrown, __THREW__ will be set to true in a wrapper,
+/// so we can jump to the right BB based on this value.
+///
+/// 4) Lower
+/// %val = landingpad catch c1 catch c2 catch c3 ...
+/// ... use %val ...
+/// into
+/// %fmc = call @__cxa_find_matching_catch_N(c1, c2, c3, ...)
+/// %val = {%fmc, __tempRet0}
+/// ... use %val ...
+/// Here N is a number calculated based on the number of clauses.
+/// Global variable __tempRet0 is set within __cxa_find_matching_catch() in
+/// JS glue code.
+///
+/// 5) Lower
+/// resume {%a, %b}
+/// into
+/// call @__resumeException(%a)
+/// where __resumeException() is a function in JS glue code.
+///
+/// 6) Lower
+/// call @llvm.eh.typeid.for(type) (intrinsic)
+/// into
+/// call @llvm_eh_typeid_for(type)
+/// llvm_eh_typeid_for function will be generated in JS glue code.
+///
+/// * Setjmp / Longjmp handling
+///
+/// 7) In the function entry that calls setjmp, initialize setjmpTable and
+/// sejmpTableSize as follows:
+/// setjmpTableSize = 4;
+/// setjmpTable = (int *) malloc(40);
+/// setjmpTable[0] = 0;
+/// setjmpTable and setjmpTableSize are used in saveSetjmp() function in JS
+/// code.
+///
+/// 8) Lower
+/// setjmp(buf)
+/// into
+/// setjmpTable = saveSetjmp(buf, label, setjmpTable, setjmpTableSize);
+/// setjmpTableSize = __tempRet0;
+/// For each dynamic setjmp call, setjmpTable stores its ID (a number which
+/// is incrementally assigned from 0) and its label (a unique number that
+/// represents each callsite of setjmp). When we need more entries in
+/// setjmpTable, it is reallocated in saveSetjmp() in JS code and it will
+/// return the new table address, and assign the new table size in
+/// __tempRet0. saveSetjmp also stores the setjmp's ID into the buffer buf.
+/// A BB with setjmp is split into two after setjmp call in order to make the
+/// post-setjmp BB the possible destination of longjmp BB.
+///
+/// 9) Lower
+/// longjmp(buf, value)
+/// into
+/// emscripten_longjmp_jmpbuf(buf, value)
+/// emscripten_longjmp_jmpbuf will be lowered to emscripten_longjmp later.
+///
+/// 10) Lower every call that might longjmp into
+/// __THREW__ = 0;
+/// call @__invoke_SIG(func, arg1, arg2)
+/// %__THREW__.val = __THREW__;
+/// __THREW__ = 0;
+/// if (%__THREW__.val != 0 & __threwValue != 0) {
+/// %label = testSetjmp(mem[%__THREW__.val], setjmpTable,
+/// setjmpTableSize);
+/// if (%label == 0)
+/// emscripten_longjmp(%__THREW__.val, __threwValue);
+/// __tempRet0 = __threwValue;
+/// } else {
+/// %label = -1;
+/// }
+/// longjmp_result = __tempRet0;
+/// switch label {
+/// label 1: goto post-setjmp BB 1
+/// label 2: goto post-setjmp BB 2
+/// ...
+/// default: goto splitted next BB
+/// }
+/// testSetjmp examines setjmpTable to see if there is a matching setjmp
+/// call. After calling an invoke wrapper, if a longjmp occurred, __THREW__
+/// will be the address of matching jmp_buf buffer and __threwValue be the
+/// second argument to longjmp. mem[__THREW__.val] is a setjmp ID that is
+/// stored in saveSetjmp. testSetjmp returns a setjmp label, a unique ID to
+/// each setjmp callsite. Label 0 means this longjmp buffer does not
+/// correspond to one of the setjmp callsites in this function, so in this
+/// case we just chain the longjmp to the caller. (Here we call
+/// emscripten_longjmp, which is different from emscripten_longjmp_jmpbuf.
+/// emscripten_longjmp_jmpbuf takes jmp_buf as its first argument, while
+/// emscripten_longjmp takes an int. Both of them will eventually be lowered
+/// to emscripten_longjmp in s2wasm, but here we need two signatures - we
+/// can't translate an int value to a jmp_buf.)
+/// Label -1 means no longjmp occurred. Otherwise we jump to the right
+/// post-setjmp BB based on the label.
+///
+///===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/SSAUpdater.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower-em-ehsjlj"
+
+static cl::list<std::string>
+ EHWhitelist("emscripten-cxx-exceptions-whitelist",
+ cl::desc("The list of function names in which Emscripten-style "
+ "exception handling is enabled (see emscripten "
+ "EMSCRIPTEN_CATCHING_WHITELIST options)"),
+ cl::CommaSeparated);
+
+namespace {
+class WebAssemblyLowerEmscriptenEHSjLj final : public ModulePass {
+ static const char *ThrewGVName;
+ static const char *ThrewValueGVName;
+ static const char *TempRet0GVName;
+ static const char *ResumeFName;
+ static const char *EHTypeIDFName;
+ static const char *SetThrewFName;
+ static const char *SetTempRet0FName;
+ static const char *EmLongjmpFName;
+ static const char *EmLongjmpJmpbufFName;
+ static const char *SaveSetjmpFName;
+ static const char *TestSetjmpFName;
+ static const char *FindMatchingCatchPrefix;
+ static const char *InvokePrefix;
+
+ bool EnableEH; // Enable exception handling
+ bool EnableSjLj; // Enable setjmp/longjmp handling
+
+ GlobalVariable *ThrewGV;
+ GlobalVariable *ThrewValueGV;
+ GlobalVariable *TempRet0GV;
+ Function *ResumeF;
+ Function *EHTypeIDF;
+ Function *EmLongjmpF;
+ Function *EmLongjmpJmpbufF;
+ Function *SaveSetjmpF;
+ Function *TestSetjmpF;
+
+ // __cxa_find_matching_catch_N functions.
+ // Indexed by the number of clauses in an original landingpad instruction.
+ DenseMap<int, Function *> FindMatchingCatches;
+ // Map of <function signature string, invoke_ wrappers>
+ StringMap<Function *> InvokeWrappers;
+ // Set of whitelisted function names for exception handling
+ std::set<std::string> EHWhitelistSet;
+
+ StringRef getPassName() const override {
+ return "WebAssembly Lower Emscripten Exceptions";
+ }
+
+ bool runEHOnFunction(Function &F);
+ bool runSjLjOnFunction(Function &F);
+ Function *getFindMatchingCatch(Module &M, unsigned NumClauses);
+
+ template <typename CallOrInvoke> Value *wrapInvoke(CallOrInvoke *CI);
+ void wrapTestSetjmp(BasicBlock *BB, Instruction *InsertPt, Value *Threw,
+ Value *SetjmpTable, Value *SetjmpTableSize, Value *&Label,
+ Value *&LongjmpResult, BasicBlock *&EndBB);
+ template <typename CallOrInvoke> Function *getInvokeWrapper(CallOrInvoke *CI);
+
+ bool areAllExceptionsAllowed() const { return EHWhitelistSet.empty(); }
+ bool canLongjmp(Module &M, const Value *Callee) const;
+
+ void createSetThrewFunction(Module &M);
+ void createSetTempRet0Function(Module &M);
+
+ void rebuildSSA(Function &F);
+
+public:
+ static char ID;
+
+ WebAssemblyLowerEmscriptenEHSjLj(bool EnableEH = true, bool EnableSjLj = true)
+ : ModulePass(ID), EnableEH(EnableEH), EnableSjLj(EnableSjLj),
+ ThrewGV(nullptr), ThrewValueGV(nullptr), TempRet0GV(nullptr),
+ ResumeF(nullptr), EHTypeIDF(nullptr), EmLongjmpF(nullptr),
+ EmLongjmpJmpbufF(nullptr), SaveSetjmpF(nullptr), TestSetjmpF(nullptr) {
+ EHWhitelistSet.insert(EHWhitelist.begin(), EHWhitelist.end());
+ }
+ bool runOnModule(Module &M) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<DominatorTreeWrapperPass>();
+ }
+};
+} // End anonymous namespace
+
+const char *WebAssemblyLowerEmscriptenEHSjLj::ThrewGVName = "__THREW__";
+const char *WebAssemblyLowerEmscriptenEHSjLj::ThrewValueGVName = "__threwValue";
+const char *WebAssemblyLowerEmscriptenEHSjLj::TempRet0GVName = "__tempRet0";
+const char *WebAssemblyLowerEmscriptenEHSjLj::ResumeFName = "__resumeException";
+const char *WebAssemblyLowerEmscriptenEHSjLj::EHTypeIDFName =
+ "llvm_eh_typeid_for";
+const char *WebAssemblyLowerEmscriptenEHSjLj::SetThrewFName = "setThrew";
+const char *WebAssemblyLowerEmscriptenEHSjLj::SetTempRet0FName = "setTempRet0";
+const char *WebAssemblyLowerEmscriptenEHSjLj::EmLongjmpFName =
+ "emscripten_longjmp";
+const char *WebAssemblyLowerEmscriptenEHSjLj::EmLongjmpJmpbufFName =
+ "emscripten_longjmp_jmpbuf";
+const char *WebAssemblyLowerEmscriptenEHSjLj::SaveSetjmpFName = "saveSetjmp";
+const char *WebAssemblyLowerEmscriptenEHSjLj::TestSetjmpFName = "testSetjmp";
+const char *WebAssemblyLowerEmscriptenEHSjLj::FindMatchingCatchPrefix =
+ "__cxa_find_matching_catch_";
+const char *WebAssemblyLowerEmscriptenEHSjLj::InvokePrefix = "__invoke_";
+
+char WebAssemblyLowerEmscriptenEHSjLj::ID = 0;
+INITIALIZE_PASS(WebAssemblyLowerEmscriptenEHSjLj, DEBUG_TYPE,
+ "WebAssembly Lower Emscripten Exceptions / Setjmp / Longjmp",
+ false, false)
+
+ModulePass *llvm::createWebAssemblyLowerEmscriptenEHSjLj(bool EnableEH,
+ bool EnableSjLj) {
+ return new WebAssemblyLowerEmscriptenEHSjLj(EnableEH, EnableSjLj);
+}
+
+static bool canThrow(const Value *V) {
+ if (const auto *F = dyn_cast<const Function>(V)) {
+ // Intrinsics cannot throw
+ if (F->isIntrinsic())
+ return false;
+ StringRef Name = F->getName();
+ // leave setjmp and longjmp (mostly) alone, we process them properly later
+ if (Name == "setjmp" || Name == "longjmp")
+ return false;
+ return !F->doesNotThrow();
+ }
+ // not a function, so an indirect call - can throw, we can't tell
+ return true;
+}
+
+// Returns an available name for a global value.
+// If the proposed name already exists in the module, adds '_' at the end of
+// the name until the name is available.
+static inline std::string createGlobalValueName(const Module &M,
+ const std::string &Propose) {
+ std::string Name = Propose;
+ while (M.getNamedGlobal(Name))
+ Name += "_";
+ return Name;
+}
+
+// Simple function name mangler.
+// This function simply takes LLVM's string representation of parameter types
+// and concatenate them with '_'. There are non-alphanumeric characters but llc
+// is ok with it, and we need to postprocess these names after the lowering
+// phase anyway.
+static std::string getSignature(FunctionType *FTy) {
+ std::string Sig;
+ raw_string_ostream OS(Sig);
+ OS << *FTy->getReturnType();
+ for (Type *ParamTy : FTy->params())
+ OS << "_" << *ParamTy;
+ if (FTy->isVarArg())
+ OS << "_...";
+ Sig = OS.str();
+ Sig.erase(remove_if(Sig, isspace), Sig.end());
+ // When s2wasm parses .s file, a comma means the end of an argument. So a
+ // mangled function name can contain any character but a comma.
+ std::replace(Sig.begin(), Sig.end(), ',', '.');
+ return Sig;
+}
+
+// Returns __cxa_find_matching_catch_N function, where N = NumClauses + 2.
+// This is because a landingpad instruction contains two more arguments, a
+// personality function and a cleanup bit, and __cxa_find_matching_catch_N
+// functions are named after the number of arguments in the original landingpad
+// instruction.
+Function *
+WebAssemblyLowerEmscriptenEHSjLj::getFindMatchingCatch(Module &M,
+ unsigned NumClauses) {
+ if (FindMatchingCatches.count(NumClauses))
+ return FindMatchingCatches[NumClauses];
+ PointerType *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
+ SmallVector<Type *, 16> Args(NumClauses, Int8PtrTy);
+ FunctionType *FTy = FunctionType::get(Int8PtrTy, Args, false);
+ Function *F =
+ Function::Create(FTy, GlobalValue::ExternalLinkage,
+ FindMatchingCatchPrefix + Twine(NumClauses + 2), &M);
+ FindMatchingCatches[NumClauses] = F;
+ return F;
+}
+
+// Generate invoke wrapper seqence with preamble and postamble
+// Preamble:
+// __THREW__ = 0;
+// Postamble:
+// %__THREW__.val = __THREW__; __THREW__ = 0;
+// Returns %__THREW__.val, which indicates whether an exception is thrown (or
+// whether longjmp occurred), for future use.
+template <typename CallOrInvoke>
+Value *WebAssemblyLowerEmscriptenEHSjLj::wrapInvoke(CallOrInvoke *CI) {
+ LLVMContext &C = CI->getModule()->getContext();
+
+ // If we are calling a function that is noreturn, we must remove that
+ // attribute. The code we insert here does expect it to return, after we
+ // catch the exception.
+ if (CI->doesNotReturn()) {
+ if (auto *F = dyn_cast<Function>(CI->getCalledValue()))
+ F->removeFnAttr(Attribute::NoReturn);
+ CI->removeAttribute(AttributeList::FunctionIndex, Attribute::NoReturn);
+ }
+
+ IRBuilder<> IRB(C);
+ IRB.SetInsertPoint(CI);
+
+ // Pre-invoke
+ // __THREW__ = 0;
+ IRB.CreateStore(IRB.getInt32(0), ThrewGV);
+
+ // Invoke function wrapper in JavaScript
+ SmallVector<Value *, 16> Args;
+ // Put the pointer to the callee as first argument, so it can be called
+ // within the invoke wrapper later
+ Args.push_back(CI->getCalledValue());
+ Args.append(CI->arg_begin(), CI->arg_end());
+ CallInst *NewCall = IRB.CreateCall(getInvokeWrapper(CI), Args);
+ NewCall->takeName(CI);
+ NewCall->setCallingConv(CI->getCallingConv());
+ NewCall->setDebugLoc(CI->getDebugLoc());
+
+ // Because we added the pointer to the callee as first argument, all
+ // argument attribute indices have to be incremented by one.
+ SmallVector<AttributeSet, 8> ArgAttributes;
+ const AttributeList &InvokeAL = CI->getAttributes();
+
+ // No attributes for the callee pointer.
+ ArgAttributes.push_back(AttributeSet());
+ // Copy the argument attributes from the original
+ for (unsigned i = 0, e = CI->getNumArgOperands(); i < e; ++i)
+ ArgAttributes.push_back(InvokeAL.getParamAttributes(i));
+
+ // Reconstruct the AttributesList based on the vector we constructed.
+ AttributeList NewCallAL =
+ AttributeList::get(C, InvokeAL.getFnAttributes(),
+ InvokeAL.getRetAttributes(), ArgAttributes);
+ NewCall->setAttributes(NewCallAL);
+
+ CI->replaceAllUsesWith(NewCall);
+
+ // Post-invoke
+ // %__THREW__.val = __THREW__; __THREW__ = 0;
+ Value *Threw = IRB.CreateLoad(ThrewGV, ThrewGV->getName() + ".val");
+ IRB.CreateStore(IRB.getInt32(0), ThrewGV);
+ return Threw;
+}
+
+// Get matching invoke wrapper based on callee signature
+template <typename CallOrInvoke>
+Function *WebAssemblyLowerEmscriptenEHSjLj::getInvokeWrapper(CallOrInvoke *CI) {
+ Module *M = CI->getModule();
+ SmallVector<Type *, 16> ArgTys;
+ Value *Callee = CI->getCalledValue();
+ FunctionType *CalleeFTy;
+ if (auto *F = dyn_cast<Function>(Callee))
+ CalleeFTy = F->getFunctionType();
+ else {
+ auto *CalleeTy = cast<PointerType>(Callee->getType())->getElementType();
+ CalleeFTy = dyn_cast<FunctionType>(CalleeTy);
+ }
+
+ std::string Sig = getSignature(CalleeFTy);
+ if (InvokeWrappers.find(Sig) != InvokeWrappers.end())
+ return InvokeWrappers[Sig];
+
+ // Put the pointer to the callee as first argument
+ ArgTys.push_back(PointerType::getUnqual(CalleeFTy));
+ // Add argument types
+ ArgTys.append(CalleeFTy->param_begin(), CalleeFTy->param_end());
+
+ FunctionType *FTy = FunctionType::get(CalleeFTy->getReturnType(), ArgTys,
+ CalleeFTy->isVarArg());
+ Function *F = Function::Create(FTy, GlobalValue::ExternalLinkage,
+ InvokePrefix + Sig, M);
+ InvokeWrappers[Sig] = F;
+ return F;
+}
+
+bool WebAssemblyLowerEmscriptenEHSjLj::canLongjmp(Module &M,
+ const Value *Callee) const {
+ if (auto *CalleeF = dyn_cast<Function>(Callee))
+ if (CalleeF->isIntrinsic())
+ return false;
+
+ // The reason we include malloc/free here is to exclude the malloc/free
+ // calls generated in setjmp prep / cleanup routines.
+ Function *SetjmpF = M.getFunction("setjmp");
+ Function *MallocF = M.getFunction("malloc");
+ Function *FreeF = M.getFunction("free");
+ if (Callee == SetjmpF || Callee == MallocF || Callee == FreeF)
+ return false;
+
+ // There are functions in JS glue code
+ if (Callee == ResumeF || Callee == EHTypeIDF || Callee == SaveSetjmpF ||
+ Callee == TestSetjmpF)
+ return false;
+
+ // __cxa_find_matching_catch_N functions cannot longjmp
+ if (Callee->getName().startswith(FindMatchingCatchPrefix))
+ return false;
+
+ // Exception-catching related functions
+ Function *BeginCatchF = M.getFunction("__cxa_begin_catch");
+ Function *EndCatchF = M.getFunction("__cxa_end_catch");
+ Function *AllocExceptionF = M.getFunction("__cxa_allocate_exception");
+ Function *ThrowF = M.getFunction("__cxa_throw");
+ Function *TerminateF = M.getFunction("__clang_call_terminate");
+ if (Callee == BeginCatchF || Callee == EndCatchF ||
+ Callee == AllocExceptionF || Callee == ThrowF || Callee == TerminateF)
+ return false;
+
+ // Otherwise we don't know
+ return true;
+}
+
+// Generate testSetjmp function call seqence with preamble and postamble.
+// The code this generates is equivalent to the following JavaScript code:
+// if (%__THREW__.val != 0 & threwValue != 0) {
+// %label = _testSetjmp(mem[%__THREW__.val], setjmpTable, setjmpTableSize);
+// if (%label == 0)
+// emscripten_longjmp(%__THREW__.val, threwValue);
+// __tempRet0 = threwValue;
+// } else {
+// %label = -1;
+// }
+// %longjmp_result = __tempRet0;
+//
+// As output parameters. returns %label, %longjmp_result, and the BB the last
+// instruction (%longjmp_result = ...) is in.
+void WebAssemblyLowerEmscriptenEHSjLj::wrapTestSetjmp(
+ BasicBlock *BB, Instruction *InsertPt, Value *Threw, Value *SetjmpTable,
+ Value *SetjmpTableSize, Value *&Label, Value *&LongjmpResult,
+ BasicBlock *&EndBB) {
+ Function *F = BB->getParent();
+ LLVMContext &C = BB->getModule()->getContext();
+ IRBuilder<> IRB(C);
+ IRB.SetInsertPoint(InsertPt);
+
+ // if (%__THREW__.val != 0 & threwValue != 0)
+ IRB.SetInsertPoint(BB);
+ BasicBlock *ThenBB1 = BasicBlock::Create(C, "if.then1", F);
+ BasicBlock *ElseBB1 = BasicBlock::Create(C, "if.else1", F);
+ BasicBlock *EndBB1 = BasicBlock::Create(C, "if.end", F);
+ Value *ThrewCmp = IRB.CreateICmpNE(Threw, IRB.getInt32(0));
+ Value *ThrewValue =
+ IRB.CreateLoad(ThrewValueGV, ThrewValueGV->getName() + ".val");
+ Value *ThrewValueCmp = IRB.CreateICmpNE(ThrewValue, IRB.getInt32(0));
+ Value *Cmp1 = IRB.CreateAnd(ThrewCmp, ThrewValueCmp, "cmp1");
+ IRB.CreateCondBr(Cmp1, ThenBB1, ElseBB1);
+
+ // %label = _testSetjmp(mem[%__THREW__.val], _setjmpTable, _setjmpTableSize);
+ // if (%label == 0)
+ IRB.SetInsertPoint(ThenBB1);
+ BasicBlock *ThenBB2 = BasicBlock::Create(C, "if.then2", F);
+ BasicBlock *EndBB2 = BasicBlock::Create(C, "if.end2", F);
+ Value *ThrewInt = IRB.CreateIntToPtr(Threw, Type::getInt32PtrTy(C),
+ Threw->getName() + ".i32p");
+ Value *LoadedThrew =
+ IRB.CreateLoad(ThrewInt, ThrewInt->getName() + ".loaded");
+ Value *ThenLabel = IRB.CreateCall(
+ TestSetjmpF, {LoadedThrew, SetjmpTable, SetjmpTableSize}, "label");
+ Value *Cmp2 = IRB.CreateICmpEQ(ThenLabel, IRB.getInt32(0));
+ IRB.CreateCondBr(Cmp2, ThenBB2, EndBB2);
+
+ // emscripten_longjmp(%__THREW__.val, threwValue);
+ IRB.SetInsertPoint(ThenBB2);
+ IRB.CreateCall(EmLongjmpF, {Threw, ThrewValue});
+ IRB.CreateUnreachable();
+
+ // __tempRet0 = threwValue;
+ IRB.SetInsertPoint(EndBB2);
+ IRB.CreateStore(ThrewValue, TempRet0GV);
+ IRB.CreateBr(EndBB1);
+
+ IRB.SetInsertPoint(ElseBB1);
+ IRB.CreateBr(EndBB1);
+
+ // longjmp_result = __tempRet0;
+ IRB.SetInsertPoint(EndBB1);
+ PHINode *LabelPHI = IRB.CreatePHI(IRB.getInt32Ty(), 2, "label");
+ LabelPHI->addIncoming(ThenLabel, EndBB2);
+
+ LabelPHI->addIncoming(IRB.getInt32(-1), ElseBB1);
+
+ // Output parameter assignment
+ Label = LabelPHI;
+ EndBB = EndBB1;
+ LongjmpResult = IRB.CreateLoad(TempRet0GV, "longjmp_result");
+}
+
+// Create setThrew function
+// function setThrew(threw, value) {
+// if (__THREW__ == 0) {
+// __THREW__ = threw;
+// __threwValue = value;
+// }
+// }
+void WebAssemblyLowerEmscriptenEHSjLj::createSetThrewFunction(Module &M) {
+ LLVMContext &C = M.getContext();
+ IRBuilder<> IRB(C);
+
+ assert(!M.getNamedGlobal(SetThrewFName) && "setThrew already exists");
+ Type *Params[] = {IRB.getInt32Ty(), IRB.getInt32Ty()};
+ FunctionType *FTy = FunctionType::get(IRB.getVoidTy(), Params, false);
+ Function *F =
+ Function::Create(FTy, GlobalValue::ExternalLinkage, SetThrewFName, &M);
+ Argument *Arg1 = &*(F->arg_begin());
+ Argument *Arg2 = &*std::next(F->arg_begin());
+ Arg1->setName("threw");
+ Arg2->setName("value");
+ BasicBlock *EntryBB = BasicBlock::Create(C, "entry", F);
+ BasicBlock *ThenBB = BasicBlock::Create(C, "if.then", F);
+ BasicBlock *EndBB = BasicBlock::Create(C, "if.end", F);
+
+ IRB.SetInsertPoint(EntryBB);
+ Value *Threw = IRB.CreateLoad(ThrewGV, ThrewGV->getName() + ".val");
+ Value *Cmp = IRB.CreateICmpEQ(Threw, IRB.getInt32(0), "cmp");
+ IRB.CreateCondBr(Cmp, ThenBB, EndBB);
+
+ IRB.SetInsertPoint(ThenBB);
+ IRB.CreateStore(Arg1, ThrewGV);
+ IRB.CreateStore(Arg2, ThrewValueGV);
+ IRB.CreateBr(EndBB);
+
+ IRB.SetInsertPoint(EndBB);
+ IRB.CreateRetVoid();
+}
+
+// Create setTempRet0 function
+// function setTempRet0(value) {
+// __tempRet0 = value;
+// }
+void WebAssemblyLowerEmscriptenEHSjLj::createSetTempRet0Function(Module &M) {
+ LLVMContext &C = M.getContext();
+ IRBuilder<> IRB(C);
+
+ assert(!M.getNamedGlobal(SetTempRet0FName) && "setTempRet0 already exists");
+ Type *Params[] = {IRB.getInt32Ty()};
+ FunctionType *FTy = FunctionType::get(IRB.getVoidTy(), Params, false);
+ Function *F =
+ Function::Create(FTy, GlobalValue::ExternalLinkage, SetTempRet0FName, &M);
+ F->arg_begin()->setName("value");
+ BasicBlock *EntryBB = BasicBlock::Create(C, "entry", F);
+ IRB.SetInsertPoint(EntryBB);
+ IRB.CreateStore(&*F->arg_begin(), TempRet0GV);
+ IRB.CreateRetVoid();
+}
+
+void WebAssemblyLowerEmscriptenEHSjLj::rebuildSSA(Function &F) {
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
+ DT.recalculate(F); // CFG has been changed
+ SSAUpdater SSA;
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ for (auto UI = I.use_begin(), UE = I.use_end(); UI != UE;) {
+ Use &U = *UI;
+ ++UI;
+ SSA.Initialize(I.getType(), I.getName());
+ SSA.AddAvailableValue(&BB, &I);
+ Instruction *User = cast<Instruction>(U.getUser());
+ if (User->getParent() == &BB)
+ continue;
+
+ if (PHINode *UserPN = dyn_cast<PHINode>(User))
+ if (UserPN->getIncomingBlock(U) == &BB)
+ continue;
+
+ if (DT.dominates(&I, User))
+ continue;
+ SSA.RewriteUseAfterInsertions(U);
+ }
+ }
+ }
+}
+
+bool WebAssemblyLowerEmscriptenEHSjLj::runOnModule(Module &M) {
+ LLVMContext &C = M.getContext();
+ IRBuilder<> IRB(C);
+
+ Function *SetjmpF = M.getFunction("setjmp");
+ Function *LongjmpF = M.getFunction("longjmp");
+ bool SetjmpUsed = SetjmpF && !SetjmpF->use_empty();
+ bool LongjmpUsed = LongjmpF && !LongjmpF->use_empty();
+ bool DoSjLj = EnableSjLj && (SetjmpUsed || LongjmpUsed);
+
+ // Create global variables __THREW__, threwValue, and __tempRet0, which are
+ // used in common for both exception handling and setjmp/longjmp handling
+ ThrewGV = new GlobalVariable(M, IRB.getInt32Ty(), false,
+ GlobalValue::ExternalLinkage, IRB.getInt32(0),
+ createGlobalValueName(M, ThrewGVName));
+ ThrewValueGV = new GlobalVariable(
+ M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage, IRB.getInt32(0),
+ createGlobalValueName(M, ThrewValueGVName));
+ TempRet0GV = new GlobalVariable(M, IRB.getInt32Ty(), false,
+ GlobalValue::ExternalLinkage, IRB.getInt32(0),
+ createGlobalValueName(M, TempRet0GVName));
+
+ bool Changed = false;
+
+ // Exception handling
+ if (EnableEH) {
+ // Register __resumeException function
+ FunctionType *ResumeFTy =
+ FunctionType::get(IRB.getVoidTy(), IRB.getInt8PtrTy(), false);
+ ResumeF = Function::Create(ResumeFTy, GlobalValue::ExternalLinkage,
+ ResumeFName, &M);
+
+ // Register llvm_eh_typeid_for function
+ FunctionType *EHTypeIDTy =
+ FunctionType::get(IRB.getInt32Ty(), IRB.getInt8PtrTy(), false);
+ EHTypeIDF = Function::Create(EHTypeIDTy, GlobalValue::ExternalLinkage,
+ EHTypeIDFName, &M);
+
+ for (Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+ Changed |= runEHOnFunction(F);
+ }
+ }
+
+ // Setjmp/longjmp handling
+ if (DoSjLj) {
+ Changed = true; // We have setjmp or longjmp somewhere
+
+ Function *MallocF = M.getFunction("malloc");
+ Function *FreeF = M.getFunction("free");
+ if (!MallocF || !FreeF)
+ report_fatal_error(
+ "malloc and free must be linked into the module if setjmp is used");
+
+ // Register saveSetjmp function
+ FunctionType *SetjmpFTy = SetjmpF->getFunctionType();
+ SmallVector<Type *, 4> Params = {SetjmpFTy->getParamType(0),
+ IRB.getInt32Ty(), Type::getInt32PtrTy(C),
+ IRB.getInt32Ty()};
+ FunctionType *FTy =
+ FunctionType::get(Type::getInt32PtrTy(C), Params, false);
+ SaveSetjmpF = Function::Create(FTy, GlobalValue::ExternalLinkage,
+ SaveSetjmpFName, &M);
+
+ // Register testSetjmp function
+ Params = {IRB.getInt32Ty(), Type::getInt32PtrTy(C), IRB.getInt32Ty()};
+ FTy = FunctionType::get(IRB.getInt32Ty(), Params, false);
+ TestSetjmpF = Function::Create(FTy, GlobalValue::ExternalLinkage,
+ TestSetjmpFName, &M);
+
+ if (LongjmpF) {
+ // Replace all uses of longjmp with emscripten_longjmp_jmpbuf, which is
+ // defined in JS code
+ EmLongjmpJmpbufF = Function::Create(LongjmpF->getFunctionType(),
+ GlobalValue::ExternalLinkage,
+ EmLongjmpJmpbufFName, &M);
+
+ LongjmpF->replaceAllUsesWith(EmLongjmpJmpbufF);
+ }
+ FTy = FunctionType::get(IRB.getVoidTy(),
+ {IRB.getInt32Ty(), IRB.getInt32Ty()}, false);
+ EmLongjmpF =
+ Function::Create(FTy, GlobalValue::ExternalLinkage, EmLongjmpFName, &M);
+
+ // Only traverse functions that uses setjmp in order not to insert
+ // unnecessary prep / cleanup code in every function
+ SmallPtrSet<Function *, 8> SetjmpUsers;
+ for (User *U : SetjmpF->users()) {
+ auto *UI = cast<Instruction>(U);
+ SetjmpUsers.insert(UI->getFunction());
+ }
+ for (Function *F : SetjmpUsers)
+ runSjLjOnFunction(*F);
+ }
+
+ if (!Changed) {
+ // Delete unused global variables and functions
+ ThrewGV->eraseFromParent();
+ ThrewValueGV->eraseFromParent();
+ TempRet0GV->eraseFromParent();
+ if (ResumeF)
+ ResumeF->eraseFromParent();
+ if (EHTypeIDF)
+ EHTypeIDF->eraseFromParent();
+ if (EmLongjmpF)
+ EmLongjmpF->eraseFromParent();
+ if (SaveSetjmpF)
+ SaveSetjmpF->eraseFromParent();
+ if (TestSetjmpF)
+ TestSetjmpF->eraseFromParent();
+ return false;
+ }
+
+ // If we have made any changes while doing exception handling or
+ // setjmp/longjmp handling, we have to create these functions for JavaScript
+ // to call.
+ createSetThrewFunction(M);
+ createSetTempRet0Function(M);
+
+ return true;
+}
+
+bool WebAssemblyLowerEmscriptenEHSjLj::runEHOnFunction(Function &F) {
+ Module &M = *F.getParent();
+ LLVMContext &C = F.getContext();
+ IRBuilder<> IRB(C);
+ bool Changed = false;
+ SmallVector<Instruction *, 64> ToErase;
+ SmallPtrSet<LandingPadInst *, 32> LandingPads;
+ bool AllowExceptions =
+ areAllExceptionsAllowed() || EHWhitelistSet.count(F.getName());
+
+ for (BasicBlock &BB : F) {
+ auto *II = dyn_cast<InvokeInst>(BB.getTerminator());
+ if (!II)
+ continue;
+ Changed = true;
+ LandingPads.insert(II->getLandingPadInst());
+ IRB.SetInsertPoint(II);
+
+ bool NeedInvoke = AllowExceptions && canThrow(II->getCalledValue());
+ if (NeedInvoke) {
+ // Wrap invoke with invoke wrapper and generate preamble/postamble
+ Value *Threw = wrapInvoke(II);
+ ToErase.push_back(II);
+
+ // Insert a branch based on __THREW__ variable
+ Value *Cmp = IRB.CreateICmpEQ(Threw, IRB.getInt32(1), "cmp");
+ IRB.CreateCondBr(Cmp, II->getUnwindDest(), II->getNormalDest());
+
+ } else {
+ // This can't throw, and we don't need this invoke, just replace it with a
+ // call+branch
+ SmallVector<Value *, 16> Args(II->arg_begin(), II->arg_end());
+ CallInst *NewCall = IRB.CreateCall(II->getCalledValue(), Args);
+ NewCall->takeName(II);
+ NewCall->setCallingConv(II->getCallingConv());
+ NewCall->setDebugLoc(II->getDebugLoc());
+ NewCall->setAttributes(II->getAttributes());
+ II->replaceAllUsesWith(NewCall);
+ ToErase.push_back(II);
+
+ IRB.CreateBr(II->getNormalDest());
+
+ // Remove any PHI node entries from the exception destination
+ II->getUnwindDest()->removePredecessor(&BB);
+ }
+ }
+
+ // Process resume instructions
+ for (BasicBlock &BB : F) {
+ // Scan the body of the basic block for resumes
+ for (Instruction &I : BB) {
+ auto *RI = dyn_cast<ResumeInst>(&I);
+ if (!RI)
+ continue;
+
+ // Split the input into legal values
+ Value *Input = RI->getValue();
+ IRB.SetInsertPoint(RI);
+ Value *Low = IRB.CreateExtractValue(Input, 0, "low");
+ // Create a call to __resumeException function
+ IRB.CreateCall(ResumeF, {Low});
+ // Add a terminator to the block
+ IRB.CreateUnreachable();
+ ToErase.push_back(RI);
+ }
+ }
+
+ // Process llvm.eh.typeid.for intrinsics
+ for (BasicBlock &BB : F) {
+ for (Instruction &I : BB) {
+ auto *CI = dyn_cast<CallInst>(&I);
+ if (!CI)
+ continue;
+ const Function *Callee = CI->getCalledFunction();
+ if (!Callee)
+ continue;
+ if (Callee->getIntrinsicID() != Intrinsic::eh_typeid_for)
+ continue;
+
+ IRB.SetInsertPoint(CI);
+ CallInst *NewCI =
+ IRB.CreateCall(EHTypeIDF, CI->getArgOperand(0), "typeid");
+ CI->replaceAllUsesWith(NewCI);
+ ToErase.push_back(CI);
+ }
+ }
+
+ // Look for orphan landingpads, can occur in blocks with no predecessors
+ for (BasicBlock &BB : F) {
+ Instruction *I = BB.getFirstNonPHI();
+ if (auto *LPI = dyn_cast<LandingPadInst>(I))
+ LandingPads.insert(LPI);
+ }
+
+ // Handle all the landingpad for this function together, as multiple invokes
+ // may share a single lp
+ for (LandingPadInst *LPI : LandingPads) {
+ IRB.SetInsertPoint(LPI);
+ SmallVector<Value *, 16> FMCArgs;
+ for (unsigned i = 0, e = LPI->getNumClauses(); i < e; ++i) {
+ Constant *Clause = LPI->getClause(i);
+ // As a temporary workaround for the lack of aggregate varargs support
+ // in the interface between JS and wasm, break out filter operands into
+ // their component elements.
+ if (LPI->isFilter(i)) {
+ auto *ATy = cast<ArrayType>(Clause->getType());
+ for (unsigned j = 0, e = ATy->getNumElements(); j < e; ++j) {
+ Value *EV = IRB.CreateExtractValue(Clause, makeArrayRef(j), "filter");
+ FMCArgs.push_back(EV);
+ }
+ } else
+ FMCArgs.push_back(Clause);
+ }
+
+ // Create a call to __cxa_find_matching_catch_N function
+ Function *FMCF = getFindMatchingCatch(M, FMCArgs.size());
+ CallInst *FMCI = IRB.CreateCall(FMCF, FMCArgs, "fmc");
+ Value *Undef = UndefValue::get(LPI->getType());
+ Value *Pair0 = IRB.CreateInsertValue(Undef, FMCI, 0, "pair0");
+ Value *TempRet0 =
+ IRB.CreateLoad(TempRet0GV, TempRet0GV->getName() + ".val");
+ Value *Pair1 = IRB.CreateInsertValue(Pair0, TempRet0, 1, "pair1");
+
+ LPI->replaceAllUsesWith(Pair1);
+ ToErase.push_back(LPI);
+ }
+
+ // Erase everything we no longer need in this function
+ for (Instruction *I : ToErase)
+ I->eraseFromParent();
+
+ return Changed;
+}
+
+bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
+ Module &M = *F.getParent();
+ LLVMContext &C = F.getContext();
+ IRBuilder<> IRB(C);
+ SmallVector<Instruction *, 64> ToErase;
+ // Vector of %setjmpTable values
+ std::vector<Instruction *> SetjmpTableInsts;
+ // Vector of %setjmpTableSize values
+ std::vector<Instruction *> SetjmpTableSizeInsts;
+
+ // Setjmp preparation
+
+ // This instruction effectively means %setjmpTableSize = 4.
+ // We create this as an instruction intentionally, and we don't want to fold
+ // this instruction to a constant 4, because this value will be used in
+ // SSAUpdater.AddAvailableValue(...) later.
+ BasicBlock &EntryBB = F.getEntryBlock();
+ BinaryOperator *SetjmpTableSize = BinaryOperator::Create(
+ Instruction::Add, IRB.getInt32(4), IRB.getInt32(0), "setjmpTableSize",
+ &*EntryBB.getFirstInsertionPt());
+ // setjmpTable = (int *) malloc(40);
+ Instruction *SetjmpTable = CallInst::CreateMalloc(
+ SetjmpTableSize, IRB.getInt32Ty(), IRB.getInt32Ty(), IRB.getInt32(40),
+ nullptr, nullptr, "setjmpTable");
+ // setjmpTable[0] = 0;
+ IRB.SetInsertPoint(SetjmpTableSize);
+ IRB.CreateStore(IRB.getInt32(0), SetjmpTable);
+ SetjmpTableInsts.push_back(SetjmpTable);
+ SetjmpTableSizeInsts.push_back(SetjmpTableSize);
+
+ // Setjmp transformation
+ std::vector<PHINode *> SetjmpRetPHIs;
+ Function *SetjmpF = M.getFunction("setjmp");
+ for (User *U : SetjmpF->users()) {
+ auto *CI = dyn_cast<CallInst>(U);
+ if (!CI)
+ report_fatal_error("Does not support indirect calls to setjmp");
+
+ BasicBlock *BB = CI->getParent();
+ if (BB->getParent() != &F) // in other function
+ continue;
+
+ // The tail is everything right after the call, and will be reached once
+ // when setjmp is called, and later when longjmp returns to the setjmp
+ BasicBlock *Tail = SplitBlock(BB, CI->getNextNode());
+ // Add a phi to the tail, which will be the output of setjmp, which
+ // indicates if this is the first call or a longjmp back. The phi directly
+ // uses the right value based on where we arrive from
+ IRB.SetInsertPoint(Tail->getFirstNonPHI());
+ PHINode *SetjmpRet = IRB.CreatePHI(IRB.getInt32Ty(), 2, "setjmp.ret");
+
+ // setjmp initial call returns 0
+ SetjmpRet->addIncoming(IRB.getInt32(0), BB);
+ // The proper output is now this, not the setjmp call itself
+ CI->replaceAllUsesWith(SetjmpRet);
+ // longjmp returns to the setjmp will add themselves to this phi
+ SetjmpRetPHIs.push_back(SetjmpRet);
+
+ // Fix call target
+ // Our index in the function is our place in the array + 1 to avoid index
+ // 0, because index 0 means the longjmp is not ours to handle.
+ IRB.SetInsertPoint(CI);
+ Value *Args[] = {CI->getArgOperand(0), IRB.getInt32(SetjmpRetPHIs.size()),
+ SetjmpTable, SetjmpTableSize};
+ Instruction *NewSetjmpTable =
+ IRB.CreateCall(SaveSetjmpF, Args, "setjmpTable");
+ Instruction *NewSetjmpTableSize =
+ IRB.CreateLoad(TempRet0GV, "setjmpTableSize");
+ SetjmpTableInsts.push_back(NewSetjmpTable);
+ SetjmpTableSizeInsts.push_back(NewSetjmpTableSize);
+ ToErase.push_back(CI);
+ }
+
+ // Update each call that can longjmp so it can return to a setjmp where
+ // relevant.
+
+ // Because we are creating new BBs while processing and don't want to make
+ // all these newly created BBs candidates again for longjmp processing, we
+ // first make the vector of candidate BBs.
+ std::vector<BasicBlock *> BBs;
+ for (BasicBlock &BB : F)
+ BBs.push_back(&BB);
+
+ // BBs.size() will change within the loop, so we query it every time
+ for (unsigned i = 0; i < BBs.size(); i++) {
+ BasicBlock *BB = BBs[i];
+ for (Instruction &I : *BB) {
+ assert(!isa<InvokeInst>(&I));
+ auto *CI = dyn_cast<CallInst>(&I);
+ if (!CI)
+ continue;
+
+ const Value *Callee = CI->getCalledValue();
+ if (!canLongjmp(M, Callee))
+ continue;
+
+ Value *Threw = nullptr;
+ BasicBlock *Tail;
+ if (Callee->getName().startswith(InvokePrefix)) {
+ // If invoke wrapper has already been generated for this call in
+ // previous EH phase, search for the load instruction
+ // %__THREW__.val = __THREW__;
+ // in postamble after the invoke wrapper call
+ LoadInst *ThrewLI = nullptr;
+ StoreInst *ThrewResetSI = nullptr;
+ for (auto I = std::next(BasicBlock::iterator(CI)), IE = BB->end();
+ I != IE; ++I) {
+ if (auto *LI = dyn_cast<LoadInst>(I))
+ if (auto *GV = dyn_cast<GlobalVariable>(LI->getPointerOperand()))
+ if (GV == ThrewGV) {
+ Threw = ThrewLI = LI;
+ break;
+ }
+ }
+ // Search for the store instruction after the load above
+ // __THREW__ = 0;
+ for (auto I = std::next(BasicBlock::iterator(ThrewLI)), IE = BB->end();
+ I != IE; ++I) {
+ if (auto *SI = dyn_cast<StoreInst>(I))
+ if (auto *GV = dyn_cast<GlobalVariable>(SI->getPointerOperand()))
+ if (GV == ThrewGV && SI->getValueOperand() == IRB.getInt32(0)) {
+ ThrewResetSI = SI;
+ break;
+ }
+ }
+ assert(Threw && ThrewLI && "Cannot find __THREW__ load after invoke");
+ assert(ThrewResetSI && "Cannot find __THREW__ store after invoke");
+ Tail = SplitBlock(BB, ThrewResetSI->getNextNode());
+
+ } else {
+ // Wrap call with invoke wrapper and generate preamble/postamble
+ Threw = wrapInvoke(CI);
+ ToErase.push_back(CI);
+ Tail = SplitBlock(BB, CI->getNextNode());
+ }
+
+ // We need to replace the terminator in Tail - SplitBlock makes BB go
+ // straight to Tail, we need to check if a longjmp occurred, and go to the
+ // right setjmp-tail if so
+ ToErase.push_back(BB->getTerminator());
+
+ // Generate a function call to testSetjmp function and preamble/postamble
+ // code to figure out (1) whether longjmp occurred (2) if longjmp
+ // occurred, which setjmp it corresponds to
+ Value *Label = nullptr;
+ Value *LongjmpResult = nullptr;
+ BasicBlock *EndBB = nullptr;
+ wrapTestSetjmp(BB, CI, Threw, SetjmpTable, SetjmpTableSize, Label,
+ LongjmpResult, EndBB);
+ assert(Label && LongjmpResult && EndBB);
+
+ // Create switch instruction
+ IRB.SetInsertPoint(EndBB);
+ SwitchInst *SI = IRB.CreateSwitch(Label, Tail, SetjmpRetPHIs.size());
+ // -1 means no longjmp happened, continue normally (will hit the default
+ // switch case). 0 means a longjmp that is not ours to handle, needs a
+ // rethrow. Otherwise the index is the same as the index in P+1 (to avoid
+ // 0).
+ for (unsigned i = 0; i < SetjmpRetPHIs.size(); i++) {
+ SI->addCase(IRB.getInt32(i + 1), SetjmpRetPHIs[i]->getParent());
+ SetjmpRetPHIs[i]->addIncoming(LongjmpResult, EndBB);
+ }
+
+ // We are splitting the block here, and must continue to find other calls
+ // in the block - which is now split. so continue to traverse in the Tail
+ BBs.push_back(Tail);
+ }
+ }
+
+ // Erase everything we no longer need in this function
+ for (Instruction *I : ToErase)
+ I->eraseFromParent();
+
+ // Free setjmpTable buffer before each return instruction
+ for (BasicBlock &BB : F) {
+ TerminatorInst *TI = BB.getTerminator();
+ if (isa<ReturnInst>(TI))
+ CallInst::CreateFree(SetjmpTable, TI);
+ }
+
+ // Every call to saveSetjmp can change setjmpTable and setjmpTableSize
+ // (when buffer reallocation occurs)
+ // entry:
+ // setjmpTableSize = 4;
+ // setjmpTable = (int *) malloc(40);
+ // setjmpTable[0] = 0;
+ // ...
+ // somebb:
+ // setjmpTable = saveSetjmp(buf, label, setjmpTable, setjmpTableSize);
+ // setjmpTableSize = __tempRet0;
+ // So we need to make sure the SSA for these variables is valid so that every
+ // saveSetjmp and testSetjmp calls have the correct arguments.
+ SSAUpdater SetjmpTableSSA;
+ SSAUpdater SetjmpTableSizeSSA;
+ SetjmpTableSSA.Initialize(Type::getInt32PtrTy(C), "setjmpTable");
+ SetjmpTableSizeSSA.Initialize(Type::getInt32Ty(C), "setjmpTableSize");
+ for (Instruction *I : SetjmpTableInsts)
+ SetjmpTableSSA.AddAvailableValue(I->getParent(), I);
+ for (Instruction *I : SetjmpTableSizeInsts)
+ SetjmpTableSizeSSA.AddAvailableValue(I->getParent(), I);
+
+ for (auto UI = SetjmpTable->use_begin(), UE = SetjmpTable->use_end();
+ UI != UE;) {
+ // Grab the use before incrementing the iterator.
+ Use &U = *UI;
+ // Increment the iterator before removing the use from the list.
+ ++UI;
+ if (Instruction *I = dyn_cast<Instruction>(U.getUser()))
+ if (I->getParent() != &EntryBB)
+ SetjmpTableSSA.RewriteUse(U);
+ }
+ for (auto UI = SetjmpTableSize->use_begin(), UE = SetjmpTableSize->use_end();
+ UI != UE;) {
+ Use &U = *UI;
+ ++UI;
+ if (Instruction *I = dyn_cast<Instruction>(U.getUser()))
+ if (I->getParent() != &EntryBB)
+ SetjmpTableSizeSSA.RewriteUse(U);
+ }
+
+ // Finally, our modifications to the cfg can break dominance of SSA variables.
+ // For example, in this code,
+ // if (x()) { .. setjmp() .. }
+ // if (y()) { .. longjmp() .. }
+ // We must split the longjmp block, and it can jump into the block splitted
+ // from setjmp one. But that means that when we split the setjmp block, it's
+ // first part no longer dominates its second part - there is a theoretically
+ // possible control flow path where x() is false, then y() is true and we
+ // reach the second part of the setjmp block, without ever reaching the first
+ // part. So, we rebuild SSA form here.
+ rebuildSSA(F);
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
new file mode 100644
index 000000000000..0020817aee41
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyLowerGlobalDtors.cpp
@@ -0,0 +1,191 @@
+//===-- WebAssemblyLowerGlobalDtors.cpp - Lower @llvm.global_dtors --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Lower @llvm.global_dtors.
+///
+/// WebAssembly doesn't have a builtin way to invoke static destructors.
+/// Implement @llvm.global_dtors by creating wrapper functions that are
+/// registered in @llvm.global_ctors and which contain a call to
+/// `__cxa_atexit` to register their destructor functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-lower-global-dtors"
+
+namespace {
+class LowerGlobalDtors final : public ModulePass {
+ StringRef getPassName() const override {
+ return "WebAssembly Lower @llvm.global_dtors";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ ModulePass::getAnalysisUsage(AU);
+ }
+
+ bool runOnModule(Module &M) override;
+
+public:
+ static char ID;
+ LowerGlobalDtors() : ModulePass(ID) {}
+};
+} // End anonymous namespace
+
+char LowerGlobalDtors::ID = 0;
+ModulePass *llvm::createWebAssemblyLowerGlobalDtors() {
+ return new LowerGlobalDtors();
+}
+
+bool LowerGlobalDtors::runOnModule(Module &M) {
+ GlobalVariable *GV = M.getGlobalVariable("llvm.global_dtors");
+ if (!GV)
+ return false;
+
+ const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList)
+ return false;
+
+ // Sanity-check @llvm.global_dtor's type.
+ StructType *ETy = dyn_cast<StructType>(InitList->getType()->getElementType());
+ if (!ETy || ETy->getNumElements() != 3 ||
+ !ETy->getTypeAtIndex(0U)->isIntegerTy() ||
+ !ETy->getTypeAtIndex(1U)->isPointerTy() ||
+ !ETy->getTypeAtIndex(2U)->isPointerTy())
+ return false; // Not (int, ptr, ptr).
+
+ // Collect the contents of @llvm.global_dtors, collated by priority and
+ // associated symbol.
+ std::map<uint16_t, MapVector<Constant *, std::vector<Constant *> > > DtorFuncs;
+ for (Value *O : InitList->operands()) {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(O);
+ if (!CS) continue; // Malformed.
+
+ ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
+ if (!Priority) continue; // Malformed.
+ uint16_t PriorityValue = Priority->getLimitedValue(UINT16_MAX);
+
+ Constant *DtorFunc = CS->getOperand(1);
+ if (DtorFunc->isNullValue())
+ break; // Found a null terminator, skip the rest.
+
+ Constant *Associated = CS->getOperand(2);
+ Associated = cast<Constant>(Associated->stripPointerCastsNoFollowAliases());
+
+ DtorFuncs[PriorityValue][Associated].push_back(DtorFunc);
+ }
+ if (DtorFuncs.empty())
+ return false;
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ LLVMContext &C = M.getContext();
+ PointerType *VoidStar = Type::getInt8PtrTy(C);
+ Type *AtExitFuncArgs[] = { VoidStar };
+ FunctionType *AtExitFuncTy = FunctionType::get(
+ Type::getVoidTy(C),
+ AtExitFuncArgs,
+ /*isVarArg=*/false);
+
+ Type *AtExitArgs[] = {
+ PointerType::get(AtExitFuncTy, 0),
+ VoidStar,
+ VoidStar
+ };
+ FunctionType *AtExitTy = FunctionType::get(
+ Type::getInt32Ty(C),
+ AtExitArgs,
+ /*isVarArg=*/false);
+ Constant *AtExit = M.getOrInsertFunction("__cxa_atexit", AtExitTy);
+
+ // Declare __dso_local.
+ Constant *DsoHandle = M.getNamedValue("__dso_handle");
+ if (!DsoHandle) {
+ Type *DsoHandleTy = Type::getInt8Ty(C);
+ GlobalVariable *Handle =
+ new GlobalVariable(M, DsoHandleTy, /*isConstant=*/true,
+ GlobalVariable::ExternalWeakLinkage,
+ nullptr, "__dso_handle");
+ Handle->setVisibility(GlobalVariable::HiddenVisibility);
+ DsoHandle = Handle;
+ }
+
+ // For each unique priority level and associated symbol, generate a function
+ // to call all the destructors at that level, and a function to register the
+ // first function with __cxa_atexit.
+ for (auto &PriorityAndMore : DtorFuncs) {
+ uint16_t Priority = PriorityAndMore.first;
+ for (auto &AssociatedAndMore : PriorityAndMore.second) {
+ Constant *Associated = AssociatedAndMore.first;
+
+ Function *CallDtors = Function::Create(
+ AtExitFuncTy, Function::PrivateLinkage,
+ "call_dtors" +
+ (Priority != UINT16_MAX ?
+ (Twine(".") + Twine(Priority)) : Twine()) +
+ (!Associated->isNullValue() ?
+ (Twine(".") + Associated->getName()) : Twine()),
+ &M);
+ BasicBlock *BB = BasicBlock::Create(C, "body", CallDtors);
+
+ for (auto Dtor : AssociatedAndMore.second)
+ CallInst::Create(Dtor, "", BB);
+ ReturnInst::Create(C, BB);
+
+ FunctionType *VoidVoid = FunctionType::get(Type::getVoidTy(C),
+ /*isVarArg=*/false);
+ Function *RegisterCallDtors = Function::Create(
+ VoidVoid, Function::PrivateLinkage,
+ "register_call_dtors" +
+ (Priority != UINT16_MAX ?
+ (Twine(".") + Twine(Priority)) : Twine()) +
+ (!Associated->isNullValue() ?
+ (Twine(".") + Associated->getName()) : Twine()),
+ &M);
+ BasicBlock *EntryBB = BasicBlock::Create(C, "entry", RegisterCallDtors);
+ BasicBlock *FailBB = BasicBlock::Create(C, "fail", RegisterCallDtors);
+ BasicBlock *RetBB = BasicBlock::Create(C, "return", RegisterCallDtors);
+
+ Value *Null = ConstantPointerNull::get(VoidStar);
+ Value *Args[] = { CallDtors, Null, DsoHandle };
+ Value *Res = CallInst::Create(AtExit, Args, "call", EntryBB);
+ Value *Cmp = new ICmpInst(*EntryBB, ICmpInst::ICMP_NE, Res,
+ Constant::getNullValue(Res->getType()));
+ BranchInst::Create(FailBB, RetBB, Cmp, EntryBB);
+
+ // If `__cxa_atexit` hits out-of-memory, trap, so that we don't misbehave.
+ // This should be very rare, because if the process is running out of memory
+ // before main has even started, something is wrong.
+ CallInst::Create(Intrinsic::getDeclaration(&M, Intrinsic::trap),
+ "", FailBB);
+ new UnreachableInst(C, FailBB);
+
+ ReturnInst::Create(C, RetBB);
+
+ // Now register the registration function with @llvm.global_ctors.
+ appendToGlobalCtors(M, RegisterCallDtors, Priority, Associated);
+ }
+ }
+
+ // Now that we've lowered everything, remove @llvm.global_dtors.
+ GV->eraseFromParent();
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
new file mode 100644
index 000000000000..4a93d4810c7d
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp
@@ -0,0 +1,235 @@
+// WebAssemblyMCInstLower.cpp - Convert WebAssembly MachineInstr to an MCInst //
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains code to lower WebAssembly MachineInstrs to their
+/// corresponding MCInst records.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMCInstLower.h"
+#include "WebAssemblyAsmPrinter.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblyRuntimeLibcallSignatures.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCSymbolWasm.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+MCSymbol *
+WebAssemblyMCInstLower::GetGlobalAddressSymbol(const MachineOperand &MO) const {
+ const GlobalValue *Global = MO.getGlobal();
+ MCSymbol *Sym = Printer.getSymbol(Global);
+ if (isa<MCSymbolELF>(Sym))
+ return Sym;
+
+ MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym);
+
+ if (const auto *FuncTy = dyn_cast<FunctionType>(Global->getValueType())) {
+ const MachineFunction &MF = *MO.getParent()->getParent()->getParent();
+ const TargetMachine &TM = MF.getTarget();
+ const Function &CurrentFunc = MF.getFunction();
+
+ SmallVector<wasm::ValType, 4> Returns;
+ SmallVector<wasm::ValType, 4> Params;
+
+ wasm::ValType iPTR =
+ MF.getSubtarget<WebAssemblySubtarget>().hasAddr64() ?
+ wasm::ValType::I64 :
+ wasm::ValType::I32;
+
+ SmallVector<MVT, 4> ResultMVTs;
+ ComputeLegalValueVTs(CurrentFunc, TM, FuncTy->getReturnType(), ResultMVTs);
+ // WebAssembly can't currently handle returning tuples.
+ if (ResultMVTs.size() <= 1)
+ for (MVT ResultMVT : ResultMVTs)
+ Returns.push_back(WebAssembly::toValType(ResultMVT));
+ else
+ Params.push_back(iPTR);
+
+ for (Type *Ty : FuncTy->params()) {
+ SmallVector<MVT, 4> ParamMVTs;
+ ComputeLegalValueVTs(CurrentFunc, TM, Ty, ParamMVTs);
+ for (MVT ParamMVT : ParamMVTs)
+ Params.push_back(WebAssembly::toValType(ParamMVT));
+ }
+
+ if (FuncTy->isVarArg())
+ Params.push_back(iPTR);
+
+ WasmSym->setReturns(std::move(Returns));
+ WasmSym->setParams(std::move(Params));
+ WasmSym->setIsFunction(true);
+ }
+
+ return WasmSym;
+}
+
+MCSymbol *WebAssemblyMCInstLower::GetExternalSymbolSymbol(
+ const MachineOperand &MO) const {
+ const char *Name = MO.getSymbolName();
+ MCSymbol *Sym = Printer.GetExternalSymbolSymbol(Name);
+ if (isa<MCSymbolELF>(Sym))
+ return Sym;
+
+ MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym);
+ const WebAssemblySubtarget &Subtarget = Printer.getSubtarget();
+
+ // __stack_pointer is a global variable; all other external symbols used by
+ // CodeGen are functions.
+ if (strcmp(Name, "__stack_pointer") == 0)
+ return WasmSym;
+
+ SmallVector<wasm::ValType, 4> Returns;
+ SmallVector<wasm::ValType, 4> Params;
+ GetSignature(Subtarget, Name, Returns, Params);
+
+ WasmSym->setReturns(std::move(Returns));
+ WasmSym->setParams(std::move(Params));
+ WasmSym->setIsFunction(true);
+
+ return WasmSym;
+}
+
+MCOperand WebAssemblyMCInstLower::LowerSymbolOperand(MCSymbol *Sym,
+ int64_t Offset,
+ bool IsFunc) const {
+ MCSymbolRefExpr::VariantKind VK =
+ IsFunc ? MCSymbolRefExpr::VK_WebAssembly_FUNCTION
+ : MCSymbolRefExpr::VK_None;
+
+ const MCExpr *Expr = MCSymbolRefExpr::create(Sym, VK, Ctx);
+
+ if (Offset != 0) {
+ if (IsFunc)
+ report_fatal_error("Function addresses with offsets not supported");
+ Expr =
+ MCBinaryExpr::createAdd(Expr, MCConstantExpr::create(Offset, Ctx), Ctx);
+ }
+
+ return MCOperand::createExpr(Expr);
+}
+
+// Return the WebAssembly type associated with the given register class.
+static wasm::ValType getType(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return wasm::ValType::I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return wasm::ValType::I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return wasm::ValType::F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return wasm::ValType::F64;
+ llvm_unreachable("Unexpected register class");
+}
+
+void WebAssemblyMCInstLower::Lower(const MachineInstr *MI,
+ MCInst &OutMI) const {
+ OutMI.setOpcode(MI->getOpcode());
+
+ const MCInstrDesc &Desc = MI->getDesc();
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+
+ MCOperand MCOp;
+ switch (MO.getType()) {
+ default:
+ MI->print(errs());
+ llvm_unreachable("unknown operand type");
+ case MachineOperand::MO_MachineBasicBlock:
+ MI->print(errs());
+ llvm_unreachable("MachineBasicBlock operand should have been rewritten");
+ case MachineOperand::MO_Register: {
+ // Ignore all implicit register operands.
+ if (MO.isImplicit())
+ continue;
+ const WebAssemblyFunctionInfo &MFI =
+ *MI->getParent()->getParent()->getInfo<WebAssemblyFunctionInfo>();
+ unsigned WAReg = MFI.getWAReg(MO.getReg());
+ MCOp = MCOperand::createReg(WAReg);
+ break;
+ }
+ case MachineOperand::MO_Immediate:
+ if (i < Desc.NumOperands) {
+ const MCOperandInfo &Info = Desc.OpInfo[i];
+ if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) {
+ MCSymbol *Sym = Printer.createTempSymbol("typeindex");
+ if (!isa<MCSymbolELF>(Sym)) {
+ SmallVector<wasm::ValType, 4> Returns;
+ SmallVector<wasm::ValType, 4> Params;
+
+ const MachineRegisterInfo &MRI =
+ MI->getParent()->getParent()->getRegInfo();
+ for (const MachineOperand &MO : MI->defs())
+ Returns.push_back(getType(MRI.getRegClass(MO.getReg())));
+ for (const MachineOperand &MO : MI->explicit_uses())
+ if (MO.isReg())
+ Params.push_back(getType(MRI.getRegClass(MO.getReg())));
+
+ // call_indirect instructions have a callee operand at the end which
+ // doesn't count as a param.
+ if (WebAssembly::isCallIndirect(*MI))
+ Params.pop_back();
+
+ MCSymbolWasm *WasmSym = cast<MCSymbolWasm>(Sym);
+ WasmSym->setReturns(std::move(Returns));
+ WasmSym->setParams(std::move(Params));
+ WasmSym->setIsFunction(true);
+
+ const MCExpr *Expr =
+ MCSymbolRefExpr::create(WasmSym,
+ MCSymbolRefExpr::VK_WebAssembly_TYPEINDEX,
+ Ctx);
+ MCOp = MCOperand::createExpr(Expr);
+ break;
+ }
+ }
+ }
+ MCOp = MCOperand::createImm(MO.getImm());
+ break;
+ case MachineOperand::MO_FPImmediate: {
+ // TODO: MC converts all floating point immediate operands to double.
+ // This is fine for numeric values, but may cause NaNs to change bits.
+ const ConstantFP *Imm = MO.getFPImm();
+ if (Imm->getType()->isFloatTy())
+ MCOp = MCOperand::createFPImm(Imm->getValueAPF().convertToFloat());
+ else if (Imm->getType()->isDoubleTy())
+ MCOp = MCOperand::createFPImm(Imm->getValueAPF().convertToDouble());
+ else
+ llvm_unreachable("unknown floating point immediate type");
+ break;
+ }
+ case MachineOperand::MO_GlobalAddress:
+ assert(MO.getTargetFlags() == 0 &&
+ "WebAssembly does not use target flags on GlobalAddresses");
+ MCOp = LowerSymbolOperand(GetGlobalAddressSymbol(MO), MO.getOffset(),
+ MO.getGlobal()->getValueType()->isFunctionTy());
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ // The target flag indicates whether this is a symbol for a
+ // variable or a function.
+ assert((MO.getTargetFlags() & -2) == 0 &&
+ "WebAssembly uses only one target flag bit on ExternalSymbols");
+ MCOp = LowerSymbolOperand(GetExternalSymbolSymbol(MO), /*Offset=*/0,
+ MO.getTargetFlags() & 1);
+ break;
+ }
+
+ OutMI.addOperand(MCOp);
+ }
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h
new file mode 100644
index 000000000000..d1d2794c3b8f
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.h
@@ -0,0 +1,46 @@
+//===-- WebAssemblyMCInstLower.h - Lower MachineInstr to MCInst -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares the class to lower WebAssembly MachineInstrs to
+/// their corresponding MCInst records.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMCINSTLOWER_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMCINSTLOWER_H
+
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class WebAssemblyAsmPrinter;
+class MCContext;
+class MCSymbol;
+class MachineInstr;
+class MachineOperand;
+
+/// This class is used to lower an MachineInstr into an MCInst.
+class LLVM_LIBRARY_VISIBILITY WebAssemblyMCInstLower {
+ MCContext &Ctx;
+ WebAssemblyAsmPrinter &Printer;
+
+ MCSymbol *GetGlobalAddressSymbol(const MachineOperand &MO) const;
+ MCSymbol *GetExternalSymbolSymbol(const MachineOperand &MO) const;
+ MCOperand LowerSymbolOperand(MCSymbol *Sym, int64_t Offset,
+ bool IsFunc) const;
+
+public:
+ WebAssemblyMCInstLower(MCContext &ctx, WebAssemblyAsmPrinter &printer)
+ : Ctx(ctx), Printer(printer) {}
+ void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
new file mode 100644
index 000000000000..ccf6a18b32ea
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.cpp
@@ -0,0 +1,62 @@
+//=- WebAssemblyMachineFunctionInfo.cpp - WebAssembly Machine Function Info -=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements WebAssembly-specific per-machine-function
+/// information.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/Analysis.h"
+using namespace llvm;
+
+WebAssemblyFunctionInfo::~WebAssemblyFunctionInfo() {}
+
+void WebAssemblyFunctionInfo::initWARegs() {
+ assert(WARegs.empty());
+ unsigned Reg = UnusedReg;
+ WARegs.resize(MF.getRegInfo().getNumVirtRegs(), Reg);
+}
+
+void llvm::ComputeLegalValueVTs(const Function &F, const TargetMachine &TM,
+ Type *Ty, SmallVectorImpl<MVT> &ValueVTs) {
+ const DataLayout &DL(F.getParent()->getDataLayout());
+ const WebAssemblyTargetLowering &TLI =
+ *TM.getSubtarget<WebAssemblySubtarget>(F).getTargetLowering();
+ SmallVector<EVT, 4> VTs;
+ ComputeValueVTs(TLI, DL, Ty, VTs);
+
+ for (EVT VT : VTs) {
+ unsigned NumRegs = TLI.getNumRegisters(F.getContext(), VT);
+ MVT RegisterVT = TLI.getRegisterType(F.getContext(), VT);
+ for (unsigned i = 0; i != NumRegs; ++i)
+ ValueVTs.push_back(RegisterVT);
+ }
+}
+
+void llvm::ComputeSignatureVTs(const Function &F, const TargetMachine &TM,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results) {
+ ComputeLegalValueVTs(F, TM, F.getReturnType(), Results);
+
+ if (Results.size() > 1) {
+ // WebAssembly currently can't lower returns of multiple values without
+ // demoting to sret (see WebAssemblyTargetLowering::CanLowerReturn). So
+ // replace multiple return values with a pointer parameter.
+ Results.clear();
+ Params.push_back(
+ MVT::getIntegerVT(TM.createDataLayout().getPointerSizeInBits()));
+ }
+
+ for (auto &Arg : F.args())
+ ComputeLegalValueVTs(F, TM, Arg.getType(), Params);
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
new file mode 100644
index 000000000000..1fcbb7791d4e
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyMachineFunctionInfo.h
@@ -0,0 +1,121 @@
+// WebAssemblyMachineFunctionInfo.h-WebAssembly machine function info-*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares WebAssembly-specific per-machine-function
+/// information.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYMACHINEFUNCTIONINFO_H
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+namespace llvm {
+
+/// This class is derived from MachineFunctionInfo and contains private
+/// WebAssembly-specific information for each MachineFunction.
+class WebAssemblyFunctionInfo final : public MachineFunctionInfo {
+ MachineFunction &MF;
+
+ std::vector<MVT> Params;
+ std::vector<MVT> Results;
+ std::vector<MVT> Locals;
+
+ /// A mapping from CodeGen vreg index to WebAssembly register number.
+ std::vector<unsigned> WARegs;
+
+ /// A mapping from CodeGen vreg index to a boolean value indicating whether
+ /// the given register is considered to be "stackified", meaning it has been
+ /// determined or made to meet the stack requirements:
+ /// - single use (per path)
+ /// - single def (per path)
+ /// - defined and used in LIFO order with other stack registers
+ BitVector VRegStackified;
+
+ // A virtual register holding the pointer to the vararg buffer for vararg
+ // functions. It is created and set in TLI::LowerFormalArguments and read by
+ // TLI::LowerVASTART
+ unsigned VarargVreg = -1U;
+
+ // A virtual register holding the base pointer for functions that have
+ // overaligned values on the user stack.
+ unsigned BasePtrVreg = -1U;
+
+ public:
+ explicit WebAssemblyFunctionInfo(MachineFunction &MF) : MF(MF) {}
+ ~WebAssemblyFunctionInfo() override;
+
+ void addParam(MVT VT) { Params.push_back(VT); }
+ const std::vector<MVT> &getParams() const { return Params; }
+
+ void addResult(MVT VT) { Results.push_back(VT); }
+ const std::vector<MVT> &getResults() const { return Results; }
+
+ void setNumLocals(size_t NumLocals) { Locals.resize(NumLocals, MVT::i32); }
+ void setLocal(size_t i, MVT VT) { Locals[i] = VT; }
+ void addLocal(MVT VT) { Locals.push_back(VT); }
+ const std::vector<MVT> &getLocals() const { return Locals; }
+
+ unsigned getVarargBufferVreg() const {
+ assert(VarargVreg != -1U && "Vararg vreg hasn't been set");
+ return VarargVreg;
+ }
+ void setVarargBufferVreg(unsigned Reg) { VarargVreg = Reg; }
+
+ unsigned getBasePointerVreg() const {
+ assert(BasePtrVreg != -1U && "Base ptr vreg hasn't been set");
+ return BasePtrVreg;
+ }
+ void setBasePointerVreg(unsigned Reg) { BasePtrVreg = Reg; }
+
+ static const unsigned UnusedReg = -1u;
+
+ void stackifyVReg(unsigned VReg) {
+ assert(MF.getRegInfo().getUniqueVRegDef(VReg));
+ if (TargetRegisterInfo::virtReg2Index(VReg) >= VRegStackified.size())
+ VRegStackified.resize(TargetRegisterInfo::virtReg2Index(VReg) + 1);
+ VRegStackified.set(TargetRegisterInfo::virtReg2Index(VReg));
+ }
+ bool isVRegStackified(unsigned VReg) const {
+ if (TargetRegisterInfo::virtReg2Index(VReg) >= VRegStackified.size())
+ return false;
+ return VRegStackified.test(TargetRegisterInfo::virtReg2Index(VReg));
+ }
+
+ void initWARegs();
+ void setWAReg(unsigned VReg, unsigned WAReg) {
+ assert(WAReg != UnusedReg);
+ assert(TargetRegisterInfo::virtReg2Index(VReg) < WARegs.size());
+ WARegs[TargetRegisterInfo::virtReg2Index(VReg)] = WAReg;
+ }
+ unsigned getWAReg(unsigned Reg) const {
+ assert(TargetRegisterInfo::virtReg2Index(Reg) < WARegs.size());
+ return WARegs[TargetRegisterInfo::virtReg2Index(Reg)];
+ }
+
+ // For a given stackified WAReg, return the id number to print with push/pop.
+ static unsigned getWARegStackId(unsigned Reg) {
+ assert(Reg & INT32_MIN);
+ return Reg & INT32_MAX;
+ }
+};
+
+void ComputeLegalValueVTs(const Function &F, const TargetMachine &TM,
+ Type *Ty, SmallVectorImpl<MVT> &ValueVTs);
+
+void ComputeSignatureVTs(const Function &F, const TargetMachine &TM,
+ SmallVectorImpl<MVT> &Params,
+ SmallVectorImpl<MVT> &Results);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
new file mode 100644
index 000000000000..ebe97848d461
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeLiveIntervals.cpp
@@ -0,0 +1,105 @@
+//===--- WebAssemblyOptimizeLiveIntervals.cpp - LiveInterval processing ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Optimize LiveIntervals for use in a post-RA context.
+//
+/// LiveIntervals normally runs before register allocation when the code is
+/// only recently lowered out of SSA form, so it's uncommon for registers to
+/// have multiple defs, and then they do, the defs are usually closely related.
+/// Later, after coalescing, tail duplication, and other optimizations, it's
+/// more common to see registers with multiple unrelated defs. This pass
+/// updates LiveIntervals to distribute the value numbers across separate
+/// LiveIntervals.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-optimize-live-intervals"
+
+namespace {
+class WebAssemblyOptimizeLiveIntervals final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Optimize Live Intervals";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addPreservedID(LiveVariablesID);
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyOptimizeLiveIntervals() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyOptimizeLiveIntervals::ID = 0;
+FunctionPass *llvm::createWebAssemblyOptimizeLiveIntervals() {
+ return new WebAssemblyOptimizeLiveIntervals();
+}
+
+bool WebAssemblyOptimizeLiveIntervals::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Optimize LiveIntervals **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ LiveIntervals &LIS = getAnalysis<LiveIntervals>();
+
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ assert(MRI.tracksLiveness() &&
+ "OptimizeLiveIntervals expects liveness");
+
+ // Split multiple-VN LiveIntervals into multiple LiveIntervals.
+ SmallVector<LiveInterval*, 4> SplitLIs;
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+ if (MRI.reg_nodbg_empty(Reg))
+ continue;
+
+ LIS.splitSeparateComponents(LIS.getInterval(Reg), SplitLIs);
+ SplitLIs.clear();
+ }
+
+ // In PrepareForLiveIntervals, we conservatively inserted IMPLICIT_DEF
+ // instructions to satisfy LiveIntervals' requirement that all uses be
+ // dominated by defs. Now that LiveIntervals has computed which of these
+ // defs are actually needed and which are dead, remove the dead ones.
+ for (auto MII = MF.begin()->begin(), MIE = MF.begin()->end(); MII != MIE; ) {
+ MachineInstr *MI = &*MII++;
+ if (MI->isImplicitDef() && MI->getOperand(0).isDead()) {
+ LiveInterval &LI = LIS.getInterval(MI->getOperand(0).getReg());
+ LIS.removeVRegDefAt(LI, LIS.getInstructionIndex(*MI).getRegSlot());
+ LIS.RemoveMachineInstrFromMaps(*MI);
+ MI->eraseFromParent();
+ }
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
new file mode 100644
index 000000000000..559165e4c86b
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyOptimizeReturned.cpp
@@ -0,0 +1,76 @@
+//===-- WebAssemblyOptimizeReturned.cpp - Optimize "returned" attributes --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Optimize calls with "returned" attributes for WebAssembly.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-optimize-returned"
+
+namespace {
+class OptimizeReturned final : public FunctionPass,
+ public InstVisitor<OptimizeReturned> {
+ StringRef getPassName() const override {
+ return "WebAssembly Optimize Returned";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnFunction(Function &F) override;
+
+ DominatorTree *DT;
+
+public:
+ static char ID;
+ OptimizeReturned() : FunctionPass(ID), DT(nullptr) {}
+
+ void visitCallSite(CallSite CS);
+};
+} // End anonymous namespace
+
+char OptimizeReturned::ID = 0;
+FunctionPass *llvm::createWebAssemblyOptimizeReturned() {
+ return new OptimizeReturned();
+}
+
+void OptimizeReturned::visitCallSite(CallSite CS) {
+ for (unsigned i = 0, e = CS.getNumArgOperands(); i < e; ++i)
+ if (CS.paramHasAttr(i, Attribute::Returned)) {
+ Instruction *Inst = CS.getInstruction();
+ Value *Arg = CS.getArgOperand(i);
+ // Ignore constants, globals, undef, etc.
+ if (isa<Constant>(Arg))
+ continue;
+ // Like replaceDominatedUsesWith but using Instruction/Use dominance.
+ for (auto UI = Arg->use_begin(), UE = Arg->use_end(); UI != UE;) {
+ Use &U = *UI++;
+ if (DT->dominates(Inst, U))
+ U.set(Inst);
+ }
+ }
+}
+
+bool OptimizeReturned::runOnFunction(Function &F) {
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+ visit(F);
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
new file mode 100644
index 000000000000..d2fbc5a22308
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPeephole.cpp
@@ -0,0 +1,210 @@
+//===-- WebAssemblyPeephole.cpp - WebAssembly Peephole Optimiztions -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Late peephole optimizations for WebAssembly.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-peephole"
+
+static cl::opt<bool> DisableWebAssemblyFallthroughReturnOpt(
+ "disable-wasm-fallthrough-return-opt", cl::Hidden,
+ cl::desc("WebAssembly: Disable fallthrough-return optimizations."),
+ cl::init(false));
+
+namespace {
+class WebAssemblyPeephole final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly late peephole optimizer";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID;
+ WebAssemblyPeephole() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyPeephole::ID = 0;
+FunctionPass *llvm::createWebAssemblyPeephole() {
+ return new WebAssemblyPeephole();
+}
+
+/// If desirable, rewrite NewReg to a drop register.
+static bool MaybeRewriteToDrop(unsigned OldReg, unsigned NewReg,
+ MachineOperand &MO, WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI) {
+ bool Changed = false;
+ if (OldReg == NewReg) {
+ Changed = true;
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(OldReg));
+ MO.setReg(NewReg);
+ MO.setIsDead();
+ MFI.stackifyVReg(NewReg);
+ }
+ return Changed;
+}
+
+static bool MaybeRewriteToFallthrough(MachineInstr &MI, MachineBasicBlock &MBB,
+ const MachineFunction &MF,
+ WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI,
+ const WebAssemblyInstrInfo &TII,
+ unsigned FallthroughOpc,
+ unsigned CopyLocalOpc) {
+ if (DisableWebAssemblyFallthroughReturnOpt)
+ return false;
+ if (&MBB != &MF.back())
+ return false;
+ if (MF.getSubtarget<WebAssemblySubtarget>()
+ .getTargetTriple().isOSBinFormatELF()) {
+ if (&MI != &MBB.back())
+ return false;
+ } else {
+ MachineBasicBlock::iterator End = MBB.end();
+ --End;
+ assert(End->getOpcode() == WebAssembly::END_FUNCTION);
+ --End;
+ if (&MI != &*End)
+ return false;
+ }
+
+ if (FallthroughOpc != WebAssembly::FALLTHROUGH_RETURN_VOID) {
+ // If the operand isn't stackified, insert a COPY to read the operand and
+ // stackify it.
+ MachineOperand &MO = MI.getOperand(0);
+ unsigned Reg = MO.getReg();
+ if (!MFI.isVRegStackified(Reg)) {
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(CopyLocalOpc), NewReg)
+ .addReg(Reg);
+ MO.setReg(NewReg);
+ MFI.stackifyVReg(NewReg);
+ }
+ }
+
+ // Rewrite the return.
+ MI.setDesc(TII.get(FallthroughOpc));
+ return true;
+}
+
+bool WebAssemblyPeephole::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Peephole **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const WebAssemblyTargetLowering &TLI =
+ *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+ auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ bool Changed = false;
+
+ for (auto &MBB : MF)
+ for (auto &MI : MBB)
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case WebAssembly::CALL_I32:
+ case WebAssembly::CALL_I64: {
+ MachineOperand &Op1 = MI.getOperand(1);
+ if (Op1.isSymbol()) {
+ StringRef Name(Op1.getSymbolName());
+ if (Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
+ Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
+ Name == TLI.getLibcallName(RTLIB::MEMSET)) {
+ LibFunc Func;
+ if (LibInfo.getLibFunc(Name, Func)) {
+ const auto &Op2 = MI.getOperand(2);
+ if (!Op2.isReg())
+ report_fatal_error("Peephole: call to builtin function with "
+ "wrong signature, not consuming reg");
+ MachineOperand &MO = MI.getOperand(0);
+ unsigned OldReg = MO.getReg();
+ unsigned NewReg = Op2.getReg();
+
+ if (MRI.getRegClass(NewReg) != MRI.getRegClass(OldReg))
+ report_fatal_error("Peephole: call to builtin function with "
+ "wrong signature, from/to mismatch");
+ Changed |= MaybeRewriteToDrop(OldReg, NewReg, MO, MFI, MRI);
+ }
+ }
+ }
+ break;
+ }
+ // Optimize away an explicit void return at the end of the function.
+ case WebAssembly::RETURN_I32:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I32,
+ WebAssembly::COPY_I32);
+ break;
+ case WebAssembly::RETURN_I64:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_I64,
+ WebAssembly::COPY_I64);
+ break;
+ case WebAssembly::RETURN_F32:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F32,
+ WebAssembly::COPY_F32);
+ break;
+ case WebAssembly::RETURN_F64:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_F64,
+ WebAssembly::COPY_F64);
+ break;
+ case WebAssembly::RETURN_v16i8:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v16i8,
+ WebAssembly::COPY_V128);
+ break;
+ case WebAssembly::RETURN_v8i16:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v8i16,
+ WebAssembly::COPY_V128);
+ break;
+ case WebAssembly::RETURN_v4i32:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4i32,
+ WebAssembly::COPY_V128);
+ break;
+ case WebAssembly::RETURN_v4f32:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_v4f32,
+ WebAssembly::COPY_V128);
+ break;
+ case WebAssembly::RETURN_VOID:
+ Changed |= MaybeRewriteToFallthrough(
+ MI, MBB, MF, MFI, MRI, TII, WebAssembly::FALLTHROUGH_RETURN_VOID,
+ WebAssembly::INSTRUCTION_LIST_END);
+ break;
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
new file mode 100644
index 000000000000..3a2876bfcde2
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyPrepareForLiveIntervals.cpp
@@ -0,0 +1,124 @@
+//===- WebAssemblyPrepareForLiveIntervals.cpp - Prepare for LiveIntervals -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Fix up code to meet LiveInterval's requirements.
+///
+/// Some CodeGen passes don't preserve LiveInterval's requirements, because
+/// they run after register allocation and it isn't important. However,
+/// WebAssembly runs LiveIntervals in a late pass. This pass transforms code
+/// to meet LiveIntervals' requirements; primarily, it ensures that all
+/// virtual register uses have definitions (IMPLICIT_DEF definitions if
+/// nothing else).
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-prepare-for-live-intervals"
+
+namespace {
+class WebAssemblyPrepareForLiveIntervals final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyPrepareForLiveIntervals() : MachineFunctionPass(ID) {}
+
+private:
+ StringRef getPassName() const override {
+ return "WebAssembly Prepare For LiveIntervals";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblyPrepareForLiveIntervals::ID = 0;
+FunctionPass *llvm::createWebAssemblyPrepareForLiveIntervals() {
+ return new WebAssemblyPrepareForLiveIntervals();
+}
+
+// Test whether the given register has an ARGUMENT def.
+static bool HasArgumentDef(unsigned Reg, const MachineRegisterInfo &MRI) {
+ for (const auto &Def : MRI.def_instructions(Reg))
+ if (WebAssembly::isArgument(Def))
+ return true;
+ return false;
+}
+
+bool WebAssemblyPrepareForLiveIntervals::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Prepare For LiveIntervals **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ bool Changed = false;
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const auto &TII = *MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ MachineBasicBlock &Entry = *MF.begin();
+
+ assert(!mustPreserveAnalysisID(LiveIntervalsID) &&
+ "LiveIntervals shouldn't be active yet!");
+
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ // BranchFolding and perhaps other passes don't preserve IMPLICIT_DEF
+ // instructions. LiveIntervals requires that all paths to virtual register
+ // uses provide a definition. Insert IMPLICIT_DEFs in the entry block to
+ // conservatively satisfy this.
+ //
+ // TODO: This is fairly heavy-handed; find a better approach.
+ //
+ for (unsigned i = 0, e = MRI.getNumVirtRegs(); i < e; ++i) {
+ unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
+
+ // Skip unused registers.
+ if (MRI.use_nodbg_empty(Reg))
+ continue;
+
+ // Skip registers that have an ARGUMENT definition.
+ if (HasArgumentDef(Reg, MRI))
+ continue;
+
+ BuildMI(Entry, Entry.begin(), DebugLoc(),
+ TII.get(WebAssembly::IMPLICIT_DEF), Reg);
+ Changed = true;
+ }
+
+ // Move ARGUMENT_* instructions to the top of the entry block, so that their
+ // liveness reflects the fact that these really are live-in values.
+ for (auto MII = Entry.begin(), MIE = Entry.end(); MII != MIE; ) {
+ MachineInstr &MI = *MII++;
+ if (WebAssembly::isArgument(MI)) {
+ MI.removeFromParent();
+ Entry.insert(Entry.begin(), &MI);
+ }
+ }
+
+ // Ok, we're now ready to run the LiveIntervals analysis again.
+ MF.getProperties().set(MachineFunctionProperties::Property::TracksLiveness);
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
new file mode 100644
index 000000000000..2ac3a839c3c8
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp
@@ -0,0 +1,174 @@
+//===-- WebAssemblyRegColoring.cpp - Register coloring --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a virtual register coloring pass.
+///
+/// WebAssembly doesn't have a fixed number of registers, but it is still
+/// desirable to minimize the total number of registers used in each function.
+///
+/// This code is modeled after lib/CodeGen/StackSlotColoring.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-coloring"
+
+namespace {
+class WebAssemblyRegColoring final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyRegColoring() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override {
+ return "WebAssembly Register Coloring";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<LiveIntervals>();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+};
+} // end anonymous namespace
+
+char WebAssemblyRegColoring::ID = 0;
+FunctionPass *llvm::createWebAssemblyRegColoring() {
+ return new WebAssemblyRegColoring();
+}
+
+// Compute the total spill weight for VReg.
+static float computeWeight(const MachineRegisterInfo *MRI,
+ const MachineBlockFrequencyInfo *MBFI,
+ unsigned VReg) {
+ float weight = 0.0f;
+ for (MachineOperand &MO : MRI->reg_nodbg_operands(VReg))
+ weight += LiveIntervals::getSpillWeight(MO.isDef(), MO.isUse(), MBFI,
+ *MO.getParent());
+ return weight;
+}
+
+bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Register Coloring **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ // If there are calls to setjmp or sigsetjmp, don't perform coloring. Virtual
+ // registers could be modified before the longjmp is executed, resulting in
+ // the wrong value being used afterwards. (See <rdar://problem/8007500>.)
+ // TODO: Does WebAssembly need to care about setjmp for register coloring?
+ if (MF.exposesReturnsTwice())
+ return false;
+
+ MachineRegisterInfo *MRI = &MF.getRegInfo();
+ LiveIntervals *Liveness = &getAnalysis<LiveIntervals>();
+ const MachineBlockFrequencyInfo *MBFI =
+ &getAnalysis<MachineBlockFrequencyInfo>();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+
+ // Gather all register intervals into a list and sort them.
+ unsigned NumVRegs = MRI->getNumVirtRegs();
+ SmallVector<LiveInterval *, 0> SortedIntervals;
+ SortedIntervals.reserve(NumVRegs);
+
+ DEBUG(dbgs() << "Interesting register intervals:\n");
+ for (unsigned i = 0; i < NumVRegs; ++i) {
+ unsigned VReg = TargetRegisterInfo::index2VirtReg(i);
+ if (MFI.isVRegStackified(VReg))
+ continue;
+ // Skip unused registers, which can use $drop.
+ if (MRI->use_empty(VReg))
+ continue;
+
+ LiveInterval *LI = &Liveness->getInterval(VReg);
+ assert(LI->weight == 0.0f);
+ LI->weight = computeWeight(MRI, MBFI, VReg);
+ DEBUG(LI->dump());
+ SortedIntervals.push_back(LI);
+ }
+ DEBUG(dbgs() << '\n');
+
+ // Sort them to put arguments first (since we don't want to rename live-in
+ // registers), by weight next, and then by position.
+ // TODO: Investigate more intelligent sorting heuristics. For starters, we
+ // should try to coalesce adjacent live intervals before non-adjacent ones.
+ std::sort(SortedIntervals.begin(), SortedIntervals.end(),
+ [MRI](LiveInterval *LHS, LiveInterval *RHS) {
+ if (MRI->isLiveIn(LHS->reg) != MRI->isLiveIn(RHS->reg))
+ return MRI->isLiveIn(LHS->reg);
+ if (LHS->weight != RHS->weight)
+ return LHS->weight > RHS->weight;
+ if (LHS->empty() || RHS->empty())
+ return !LHS->empty() && RHS->empty();
+ return *LHS < *RHS;
+ });
+
+ DEBUG(dbgs() << "Coloring register intervals:\n");
+ SmallVector<unsigned, 16> SlotMapping(SortedIntervals.size(), -1u);
+ SmallVector<SmallVector<LiveInterval *, 4>, 16> Assignments(
+ SortedIntervals.size());
+ BitVector UsedColors(SortedIntervals.size());
+ bool Changed = false;
+ for (size_t i = 0, e = SortedIntervals.size(); i < e; ++i) {
+ LiveInterval *LI = SortedIntervals[i];
+ unsigned Old = LI->reg;
+ size_t Color = i;
+ const TargetRegisterClass *RC = MRI->getRegClass(Old);
+
+ // Check if it's possible to reuse any of the used colors.
+ if (!MRI->isLiveIn(Old))
+ for (unsigned C : UsedColors.set_bits()) {
+ if (MRI->getRegClass(SortedIntervals[C]->reg) != RC)
+ continue;
+ for (LiveInterval *OtherLI : Assignments[C])
+ if (!OtherLI->empty() && OtherLI->overlaps(*LI))
+ goto continue_outer;
+ Color = C;
+ break;
+ continue_outer:;
+ }
+
+ unsigned New = SortedIntervals[Color]->reg;
+ SlotMapping[i] = New;
+ Changed |= Old != New;
+ UsedColors.set(Color);
+ Assignments[Color].push_back(LI);
+ DEBUG(dbgs() << "Assigning vreg"
+ << TargetRegisterInfo::virtReg2Index(LI->reg) << " to vreg"
+ << TargetRegisterInfo::virtReg2Index(New) << "\n");
+ }
+ if (!Changed)
+ return false;
+
+ // Rewrite register operands.
+ for (size_t i = 0, e = SortedIntervals.size(); i < e; ++i) {
+ unsigned Old = SortedIntervals[i]->reg;
+ unsigned New = SlotMapping[i];
+ if (Old != New)
+ MRI->replaceRegWith(Old, New);
+ }
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
new file mode 100644
index 000000000000..766ab456a8e6
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegNumbering.cpp
@@ -0,0 +1,107 @@
+//===-- WebAssemblyRegNumbering.cpp - Register Numbering ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a pass which assigns WebAssembly register
+/// numbers for CodeGen virtual registers.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-numbering"
+
+namespace {
+class WebAssemblyRegNumbering final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Register Numbering";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyRegNumbering() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyRegNumbering::ID = 0;
+FunctionPass *llvm::createWebAssemblyRegNumbering() {
+ return new WebAssemblyRegNumbering();
+}
+
+bool WebAssemblyRegNumbering::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Register Numbering **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ MFI.initWARegs();
+
+ // WebAssembly argument registers are in the same index space as local
+ // variables. Assign the numbers for them first.
+ MachineBasicBlock &EntryMBB = MF.front();
+ for (MachineInstr &MI : EntryMBB) {
+ if (!WebAssembly::isArgument(MI))
+ break;
+
+ int64_t Imm = MI.getOperand(1).getImm();
+ DEBUG(dbgs() << "Arg VReg " << MI.getOperand(0).getReg() << " -> WAReg "
+ << Imm << "\n");
+ MFI.setWAReg(MI.getOperand(0).getReg(), Imm);
+ }
+
+ // Then assign regular WebAssembly registers for all remaining used
+ // virtual registers. TODO: Consider sorting the registers by frequency of
+ // use, to maximize usage of small immediate fields.
+ unsigned NumVRegs = MF.getRegInfo().getNumVirtRegs();
+ unsigned NumStackRegs = 0;
+ // Start the numbering for locals after the arg regs
+ unsigned CurReg = MFI.getParams().size();
+ for (unsigned VRegIdx = 0; VRegIdx < NumVRegs; ++VRegIdx) {
+ unsigned VReg = TargetRegisterInfo::index2VirtReg(VRegIdx);
+ // Skip unused registers.
+ if (MRI.use_empty(VReg))
+ continue;
+ // Handle stackified registers.
+ if (MFI.isVRegStackified(VReg)) {
+ DEBUG(dbgs() << "VReg " << VReg << " -> WAReg "
+ << (INT32_MIN | NumStackRegs) << "\n");
+ MFI.setWAReg(VReg, INT32_MIN | NumStackRegs++);
+ continue;
+ }
+ if (MFI.getWAReg(VReg) == WebAssemblyFunctionInfo::UnusedReg) {
+ DEBUG(dbgs() << "VReg " << VReg << " -> WAReg " << CurReg << "\n");
+ MFI.setWAReg(VReg, CurReg++);
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
new file mode 100644
index 000000000000..a4bb967f36f6
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp
@@ -0,0 +1,894 @@
+//===-- WebAssemblyRegStackify.cpp - Register Stackification --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a register stacking pass.
+///
+/// This pass reorders instructions to put register uses and defs in an order
+/// such that they form single-use expression trees. Registers fitting this form
+/// are then marked as "stackified", meaning references to them are replaced by
+/// "push" and "pop" from the value stack.
+///
+/// This is primarily a code size optimization, since temporary values on the
+/// value stack don't need to be named.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h" // for WebAssembly::ARGUMENT_*
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "WebAssemblyUtilities.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-stackify"
+
+namespace {
+class WebAssemblyRegStackify final : public MachineFunctionPass {
+ StringRef getPassName() const override {
+ return "WebAssembly Register Stackify";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addPreservedID(LiveVariablesID);
+ AU.addPreserved<MachineDominatorTree>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyRegStackify() : MachineFunctionPass(ID) {}
+};
+} // end anonymous namespace
+
+char WebAssemblyRegStackify::ID = 0;
+FunctionPass *llvm::createWebAssemblyRegStackify() {
+ return new WebAssemblyRegStackify();
+}
+
+// Decorate the given instruction with implicit operands that enforce the
+// expression stack ordering constraints for an instruction which is on
+// the expression stack.
+static void ImposeStackOrdering(MachineInstr *MI) {
+ // Write the opaque VALUE_STACK register.
+ if (!MI->definesRegister(WebAssembly::VALUE_STACK))
+ MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
+ /*isDef=*/true,
+ /*isImp=*/true));
+
+ // Also read the opaque VALUE_STACK register.
+ if (!MI->readsRegister(WebAssembly::VALUE_STACK))
+ MI->addOperand(MachineOperand::CreateReg(WebAssembly::VALUE_STACK,
+ /*isDef=*/false,
+ /*isImp=*/true));
+}
+
+// Convert an IMPLICIT_DEF instruction into an instruction which defines
+// a constant zero value.
+static void ConvertImplicitDefToConstZero(MachineInstr *MI,
+ MachineRegisterInfo &MRI,
+ const TargetInstrInfo *TII,
+ MachineFunction &MF) {
+ assert(MI->getOpcode() == TargetOpcode::IMPLICIT_DEF);
+
+ const auto *RegClass =
+ MRI.getRegClass(MI->getOperand(0).getReg());
+ if (RegClass == &WebAssembly::I32RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_I32));
+ MI->addOperand(MachineOperand::CreateImm(0));
+ } else if (RegClass == &WebAssembly::I64RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_I64));
+ MI->addOperand(MachineOperand::CreateImm(0));
+ } else if (RegClass == &WebAssembly::F32RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_F32));
+ ConstantFP *Val = cast<ConstantFP>(Constant::getNullValue(
+ Type::getFloatTy(MF.getFunction().getContext())));
+ MI->addOperand(MachineOperand::CreateFPImm(Val));
+ } else if (RegClass == &WebAssembly::F64RegClass) {
+ MI->setDesc(TII->get(WebAssembly::CONST_F64));
+ ConstantFP *Val = cast<ConstantFP>(Constant::getNullValue(
+ Type::getDoubleTy(MF.getFunction().getContext())));
+ MI->addOperand(MachineOperand::CreateFPImm(Val));
+ } else {
+ llvm_unreachable("Unexpected reg class");
+ }
+}
+
+// Determine whether a call to the callee referenced by
+// MI->getOperand(CalleeOpNo) reads memory, writes memory, and/or has side
+// effects.
+static void QueryCallee(const MachineInstr &MI, unsigned CalleeOpNo, bool &Read,
+ bool &Write, bool &Effects, bool &StackPointer) {
+ // All calls can use the stack pointer.
+ StackPointer = true;
+
+ const MachineOperand &MO = MI.getOperand(CalleeOpNo);
+ if (MO.isGlobal()) {
+ const Constant *GV = MO.getGlobal();
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
+ if (!GA->isInterposable())
+ GV = GA->getAliasee();
+
+ if (const Function *F = dyn_cast<Function>(GV)) {
+ if (!F->doesNotThrow())
+ Effects = true;
+ if (F->doesNotAccessMemory())
+ return;
+ if (F->onlyReadsMemory()) {
+ Read = true;
+ return;
+ }
+ }
+ }
+
+ // Assume the worst.
+ Write = true;
+ Read = true;
+ Effects = true;
+}
+
+// Determine whether MI reads memory, writes memory, has side effects,
+// and/or uses the stack pointer value.
+static void Query(const MachineInstr &MI, AliasAnalysis &AA, bool &Read,
+ bool &Write, bool &Effects, bool &StackPointer) {
+ assert(!MI.isPosition());
+ assert(!MI.isTerminator());
+
+ if (MI.isDebugValue())
+ return;
+
+ // Check for loads.
+ if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(&AA))
+ Read = true;
+
+ // Check for stores.
+ if (MI.mayStore()) {
+ Write = true;
+
+ // Check for stores to __stack_pointer.
+ for (auto MMO : MI.memoperands()) {
+ const MachinePointerInfo &MPI = MMO->getPointerInfo();
+ if (MPI.V.is<const PseudoSourceValue *>()) {
+ auto PSV = MPI.V.get<const PseudoSourceValue *>();
+ if (const ExternalSymbolPseudoSourceValue *EPSV =
+ dyn_cast<ExternalSymbolPseudoSourceValue>(PSV))
+ if (StringRef(EPSV->getSymbol()) == "__stack_pointer") {
+ StackPointer = true;
+ }
+ }
+ }
+ } else if (MI.hasOrderedMemoryRef()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::DIV_S_I32: case WebAssembly::DIV_S_I64:
+ case WebAssembly::REM_S_I32: case WebAssembly::REM_S_I64:
+ case WebAssembly::DIV_U_I32: case WebAssembly::DIV_U_I64:
+ case WebAssembly::REM_U_I32: case WebAssembly::REM_U_I64:
+ case WebAssembly::I32_TRUNC_S_F32: case WebAssembly::I64_TRUNC_S_F32:
+ case WebAssembly::I32_TRUNC_S_F64: case WebAssembly::I64_TRUNC_S_F64:
+ case WebAssembly::I32_TRUNC_U_F32: case WebAssembly::I64_TRUNC_U_F32:
+ case WebAssembly::I32_TRUNC_U_F64: case WebAssembly::I64_TRUNC_U_F64:
+ // These instruction have hasUnmodeledSideEffects() returning true
+ // because they trap on overflow and invalid so they can't be arbitrarily
+ // moved, however hasOrderedMemoryRef() interprets this plus their lack
+ // of memoperands as having a potential unknown memory reference.
+ break;
+ default:
+ // Record volatile accesses, unless it's a call, as calls are handled
+ // specially below.
+ if (!MI.isCall()) {
+ Write = true;
+ Effects = true;
+ }
+ break;
+ }
+ }
+
+ // Check for side effects.
+ if (MI.hasUnmodeledSideEffects()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::DIV_S_I32: case WebAssembly::DIV_S_I64:
+ case WebAssembly::REM_S_I32: case WebAssembly::REM_S_I64:
+ case WebAssembly::DIV_U_I32: case WebAssembly::DIV_U_I64:
+ case WebAssembly::REM_U_I32: case WebAssembly::REM_U_I64:
+ case WebAssembly::I32_TRUNC_S_F32: case WebAssembly::I64_TRUNC_S_F32:
+ case WebAssembly::I32_TRUNC_S_F64: case WebAssembly::I64_TRUNC_S_F64:
+ case WebAssembly::I32_TRUNC_U_F32: case WebAssembly::I64_TRUNC_U_F32:
+ case WebAssembly::I32_TRUNC_U_F64: case WebAssembly::I64_TRUNC_U_F64:
+ // These instructions have hasUnmodeledSideEffects() returning true
+ // because they trap on overflow and invalid so they can't be arbitrarily
+ // moved, however in the specific case of register stackifying, it is safe
+ // to move them because overflow and invalid are Undefined Behavior.
+ break;
+ default:
+ Effects = true;
+ break;
+ }
+ }
+
+ // Analyze calls.
+ if (MI.isCall()) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::CALL_VOID:
+ case WebAssembly::CALL_INDIRECT_VOID:
+ QueryCallee(MI, 0, Read, Write, Effects, StackPointer);
+ break;
+ case WebAssembly::CALL_I32: case WebAssembly::CALL_I64:
+ case WebAssembly::CALL_F32: case WebAssembly::CALL_F64:
+ case WebAssembly::CALL_INDIRECT_I32: case WebAssembly::CALL_INDIRECT_I64:
+ case WebAssembly::CALL_INDIRECT_F32: case WebAssembly::CALL_INDIRECT_F64:
+ QueryCallee(MI, 1, Read, Write, Effects, StackPointer);
+ break;
+ default:
+ llvm_unreachable("unexpected call opcode");
+ }
+ }
+}
+
+// Test whether Def is safe and profitable to rematerialize.
+static bool ShouldRematerialize(const MachineInstr &Def, AliasAnalysis &AA,
+ const WebAssemblyInstrInfo *TII) {
+ return Def.isAsCheapAsAMove() && TII->isTriviallyReMaterializable(Def, &AA);
+}
+
+// Identify the definition for this register at this point. This is a
+// generalization of MachineRegisterInfo::getUniqueVRegDef that uses
+// LiveIntervals to handle complex cases.
+static MachineInstr *GetVRegDef(unsigned Reg, const MachineInstr *Insert,
+ const MachineRegisterInfo &MRI,
+ const LiveIntervals &LIS)
+{
+ // Most registers are in SSA form here so we try a quick MRI query first.
+ if (MachineInstr *Def = MRI.getUniqueVRegDef(Reg))
+ return Def;
+
+ // MRI doesn't know what the Def is. Try asking LIS.
+ if (const VNInfo *ValNo = LIS.getInterval(Reg).getVNInfoBefore(
+ LIS.getInstructionIndex(*Insert)))
+ return LIS.getInstructionFromIndex(ValNo->def);
+
+ return nullptr;
+}
+
+// Test whether Reg, as defined at Def, has exactly one use. This is a
+// generalization of MachineRegisterInfo::hasOneUse that uses LiveIntervals
+// to handle complex cases.
+static bool HasOneUse(unsigned Reg, MachineInstr *Def,
+ MachineRegisterInfo &MRI, MachineDominatorTree &MDT,
+ LiveIntervals &LIS) {
+ // Most registers are in SSA form here so we try a quick MRI query first.
+ if (MRI.hasOneUse(Reg))
+ return true;
+
+ bool HasOne = false;
+ const LiveInterval &LI = LIS.getInterval(Reg);
+ const VNInfo *DefVNI = LI.getVNInfoAt(
+ LIS.getInstructionIndex(*Def).getRegSlot());
+ assert(DefVNI);
+ for (auto &I : MRI.use_nodbg_operands(Reg)) {
+ const auto &Result = LI.Query(LIS.getInstructionIndex(*I.getParent()));
+ if (Result.valueIn() == DefVNI) {
+ if (!Result.isKill())
+ return false;
+ if (HasOne)
+ return false;
+ HasOne = true;
+ }
+ }
+ return HasOne;
+}
+
+// Test whether it's safe to move Def to just before Insert.
+// TODO: Compute memory dependencies in a way that doesn't require always
+// walking the block.
+// TODO: Compute memory dependencies in a way that uses AliasAnalysis to be
+// more precise.
+static bool IsSafeToMove(const MachineInstr *Def, const MachineInstr *Insert,
+ AliasAnalysis &AA, const MachineRegisterInfo &MRI) {
+ assert(Def->getParent() == Insert->getParent());
+
+ // Check for register dependencies.
+ SmallVector<unsigned, 4> MutableRegisters;
+ for (const MachineOperand &MO : Def->operands()) {
+ if (!MO.isReg() || MO.isUndef())
+ continue;
+ unsigned Reg = MO.getReg();
+
+ // If the register is dead here and at Insert, ignore it.
+ if (MO.isDead() && Insert->definesRegister(Reg) &&
+ !Insert->readsRegister(Reg))
+ continue;
+
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ // Ignore ARGUMENTS; it's just used to keep the ARGUMENT_* instructions
+ // from moving down, and we've already checked for that.
+ if (Reg == WebAssembly::ARGUMENTS)
+ continue;
+ // If the physical register is never modified, ignore it.
+ if (!MRI.isPhysRegModified(Reg))
+ continue;
+ // Otherwise, it's a physical register with unknown liveness.
+ return false;
+ }
+
+ // If one of the operands isn't in SSA form, it has different values at
+ // different times, and we need to make sure we don't move our use across
+ // a different def.
+ if (!MO.isDef() && !MRI.hasOneDef(Reg))
+ MutableRegisters.push_back(Reg);
+ }
+
+ bool Read = false, Write = false, Effects = false, StackPointer = false;
+ Query(*Def, AA, Read, Write, Effects, StackPointer);
+
+ // If the instruction does not access memory and has no side effects, it has
+ // no additional dependencies.
+ bool HasMutableRegisters = !MutableRegisters.empty();
+ if (!Read && !Write && !Effects && !StackPointer && !HasMutableRegisters)
+ return true;
+
+ // Scan through the intervening instructions between Def and Insert.
+ MachineBasicBlock::const_iterator D(Def), I(Insert);
+ for (--I; I != D; --I) {
+ bool InterveningRead = false;
+ bool InterveningWrite = false;
+ bool InterveningEffects = false;
+ bool InterveningStackPointer = false;
+ Query(*I, AA, InterveningRead, InterveningWrite, InterveningEffects,
+ InterveningStackPointer);
+ if (Effects && InterveningEffects)
+ return false;
+ if (Read && InterveningWrite)
+ return false;
+ if (Write && (InterveningRead || InterveningWrite))
+ return false;
+ if (StackPointer && InterveningStackPointer)
+ return false;
+
+ for (unsigned Reg : MutableRegisters)
+ for (const MachineOperand &MO : I->operands())
+ if (MO.isReg() && MO.isDef() && MO.getReg() == Reg)
+ return false;
+ }
+
+ return true;
+}
+
+/// Test whether OneUse, a use of Reg, dominates all of Reg's other uses.
+static bool OneUseDominatesOtherUses(unsigned Reg, const MachineOperand &OneUse,
+ const MachineBasicBlock &MBB,
+ const MachineRegisterInfo &MRI,
+ const MachineDominatorTree &MDT,
+ LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI) {
+ const LiveInterval &LI = LIS.getInterval(Reg);
+
+ const MachineInstr *OneUseInst = OneUse.getParent();
+ VNInfo *OneUseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*OneUseInst));
+
+ for (const MachineOperand &Use : MRI.use_nodbg_operands(Reg)) {
+ if (&Use == &OneUse)
+ continue;
+
+ const MachineInstr *UseInst = Use.getParent();
+ VNInfo *UseVNI = LI.getVNInfoBefore(LIS.getInstructionIndex(*UseInst));
+
+ if (UseVNI != OneUseVNI)
+ continue;
+
+ const MachineInstr *OneUseInst = OneUse.getParent();
+ if (UseInst == OneUseInst) {
+ // Another use in the same instruction. We need to ensure that the one
+ // selected use happens "before" it.
+ if (&OneUse > &Use)
+ return false;
+ } else {
+ // Test that the use is dominated by the one selected use.
+ while (!MDT.dominates(OneUseInst, UseInst)) {
+ // Actually, dominating is over-conservative. Test that the use would
+ // happen after the one selected use in the stack evaluation order.
+ //
+ // This is needed as a consequence of using implicit get_locals for
+ // uses and implicit set_locals for defs.
+ if (UseInst->getDesc().getNumDefs() == 0)
+ return false;
+ const MachineOperand &MO = UseInst->getOperand(0);
+ if (!MO.isReg())
+ return false;
+ unsigned DefReg = MO.getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(DefReg) ||
+ !MFI.isVRegStackified(DefReg))
+ return false;
+ assert(MRI.hasOneUse(DefReg));
+ const MachineOperand &NewUse = *MRI.use_begin(DefReg);
+ const MachineInstr *NewUseInst = NewUse.getParent();
+ if (NewUseInst == OneUseInst) {
+ if (&OneUse > &NewUse)
+ return false;
+ break;
+ }
+ UseInst = NewUseInst;
+ }
+ }
+ }
+ return true;
+}
+
+/// Get the appropriate tee opcode for the given register class.
+static unsigned GetTeeOpcode(const TargetRegisterClass *RC) {
+ if (RC == &WebAssembly::I32RegClass)
+ return WebAssembly::TEE_I32;
+ if (RC == &WebAssembly::I64RegClass)
+ return WebAssembly::TEE_I64;
+ if (RC == &WebAssembly::F32RegClass)
+ return WebAssembly::TEE_F32;
+ if (RC == &WebAssembly::F64RegClass)
+ return WebAssembly::TEE_F64;
+ if (RC == &WebAssembly::V128RegClass)
+ return WebAssembly::TEE_V128;
+ llvm_unreachable("Unexpected register class");
+}
+
+// Shrink LI to its uses, cleaning up LI.
+static void ShrinkToUses(LiveInterval &LI, LiveIntervals &LIS) {
+ if (LIS.shrinkToUses(&LI)) {
+ SmallVector<LiveInterval*, 4> SplitLIs;
+ LIS.splitSeparateComponents(LI, SplitLIs);
+ }
+}
+
+/// A single-use def in the same block with no intervening memory or register
+/// dependencies; move the def down and nest it with the current instruction.
+static MachineInstr *MoveForSingleUse(unsigned Reg, MachineOperand& Op,
+ MachineInstr *Def,
+ MachineBasicBlock &MBB,
+ MachineInstr *Insert, LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI) {
+ DEBUG(dbgs() << "Move for single use: "; Def->dump());
+
+ MBB.splice(Insert, &MBB, Def);
+ LIS.handleMove(*Def);
+
+ if (MRI.hasOneDef(Reg) && MRI.hasOneUse(Reg)) {
+ // No one else is using this register for anything so we can just stackify
+ // it in place.
+ MFI.stackifyVReg(Reg);
+ } else {
+ // The register may have unrelated uses or defs; create a new register for
+ // just our one def and use so that we can stackify it.
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ Def->getOperand(0).setReg(NewReg);
+ Op.setReg(NewReg);
+
+ // Tell LiveIntervals about the new register.
+ LIS.createAndComputeVirtRegInterval(NewReg);
+
+ // Tell LiveIntervals about the changes to the old register.
+ LiveInterval &LI = LIS.getInterval(Reg);
+ LI.removeSegment(LIS.getInstructionIndex(*Def).getRegSlot(),
+ LIS.getInstructionIndex(*Op.getParent()).getRegSlot(),
+ /*RemoveDeadValNo=*/true);
+
+ MFI.stackifyVReg(NewReg);
+
+ DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ }
+
+ ImposeStackOrdering(Def);
+ return Def;
+}
+
+/// A trivially cloneable instruction; clone it and nest the new copy with the
+/// current instruction.
+static MachineInstr *RematerializeCheapDef(
+ unsigned Reg, MachineOperand &Op, MachineInstr &Def, MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator Insert, LiveIntervals &LIS,
+ WebAssemblyFunctionInfo &MFI, MachineRegisterInfo &MRI,
+ const WebAssemblyInstrInfo *TII, const WebAssemblyRegisterInfo *TRI) {
+ DEBUG(dbgs() << "Rematerializing cheap def: "; Def.dump());
+ DEBUG(dbgs() << " - for use in "; Op.getParent()->dump());
+
+ unsigned NewReg = MRI.createVirtualRegister(MRI.getRegClass(Reg));
+ TII->reMaterialize(MBB, Insert, NewReg, 0, Def, *TRI);
+ Op.setReg(NewReg);
+ MachineInstr *Clone = &*std::prev(Insert);
+ LIS.InsertMachineInstrInMaps(*Clone);
+ LIS.createAndComputeVirtRegInterval(NewReg);
+ MFI.stackifyVReg(NewReg);
+ ImposeStackOrdering(Clone);
+
+ DEBUG(dbgs() << " - Cloned to "; Clone->dump());
+
+ // Shrink the interval.
+ bool IsDead = MRI.use_empty(Reg);
+ if (!IsDead) {
+ LiveInterval &LI = LIS.getInterval(Reg);
+ ShrinkToUses(LI, LIS);
+ IsDead = !LI.liveAt(LIS.getInstructionIndex(Def).getDeadSlot());
+ }
+
+ // If that was the last use of the original, delete the original.
+ if (IsDead) {
+ DEBUG(dbgs() << " - Deleting original\n");
+ SlotIndex Idx = LIS.getInstructionIndex(Def).getRegSlot();
+ LIS.removePhysRegDefAt(WebAssembly::ARGUMENTS, Idx);
+ LIS.removeInterval(Reg);
+ LIS.RemoveMachineInstrFromMaps(Def);
+ Def.eraseFromParent();
+ }
+
+ return Clone;
+}
+
+/// A multiple-use def in the same block with no intervening memory or register
+/// dependencies; move the def down, nest it with the current instruction, and
+/// insert a tee to satisfy the rest of the uses. As an illustration, rewrite
+/// this:
+///
+/// Reg = INST ... // Def
+/// INST ..., Reg, ... // Insert
+/// INST ..., Reg, ...
+/// INST ..., Reg, ...
+///
+/// to this:
+///
+/// DefReg = INST ... // Def (to become the new Insert)
+/// TeeReg, Reg = TEE_... DefReg
+/// INST ..., TeeReg, ... // Insert
+/// INST ..., Reg, ...
+/// INST ..., Reg, ...
+///
+/// with DefReg and TeeReg stackified. This eliminates a get_local from the
+/// resulting code.
+static MachineInstr *MoveAndTeeForMultiUse(
+ unsigned Reg, MachineOperand &Op, MachineInstr *Def, MachineBasicBlock &MBB,
+ MachineInstr *Insert, LiveIntervals &LIS, WebAssemblyFunctionInfo &MFI,
+ MachineRegisterInfo &MRI, const WebAssemblyInstrInfo *TII) {
+ DEBUG(dbgs() << "Move and tee for multi-use:"; Def->dump());
+
+ // Move Def into place.
+ MBB.splice(Insert, &MBB, Def);
+ LIS.handleMove(*Def);
+
+ // Create the Tee and attach the registers.
+ const auto *RegClass = MRI.getRegClass(Reg);
+ unsigned TeeReg = MRI.createVirtualRegister(RegClass);
+ unsigned DefReg = MRI.createVirtualRegister(RegClass);
+ MachineOperand &DefMO = Def->getOperand(0);
+ MachineInstr *Tee = BuildMI(MBB, Insert, Insert->getDebugLoc(),
+ TII->get(GetTeeOpcode(RegClass)), TeeReg)
+ .addReg(Reg, RegState::Define)
+ .addReg(DefReg, getUndefRegState(DefMO.isDead()));
+ Op.setReg(TeeReg);
+ DefMO.setReg(DefReg);
+ SlotIndex TeeIdx = LIS.InsertMachineInstrInMaps(*Tee).getRegSlot();
+ SlotIndex DefIdx = LIS.getInstructionIndex(*Def).getRegSlot();
+
+ // Tell LiveIntervals we moved the original vreg def from Def to Tee.
+ LiveInterval &LI = LIS.getInterval(Reg);
+ LiveInterval::iterator I = LI.FindSegmentContaining(DefIdx);
+ VNInfo *ValNo = LI.getVNInfoAt(DefIdx);
+ I->start = TeeIdx;
+ ValNo->def = TeeIdx;
+ ShrinkToUses(LI, LIS);
+
+ // Finish stackifying the new regs.
+ LIS.createAndComputeVirtRegInterval(TeeReg);
+ LIS.createAndComputeVirtRegInterval(DefReg);
+ MFI.stackifyVReg(DefReg);
+ MFI.stackifyVReg(TeeReg);
+ ImposeStackOrdering(Def);
+ ImposeStackOrdering(Tee);
+
+ DEBUG(dbgs() << " - Replaced register: "; Def->dump());
+ DEBUG(dbgs() << " - Tee instruction: "; Tee->dump());
+ return Def;
+}
+
+namespace {
+/// A stack for walking the tree of instructions being built, visiting the
+/// MachineOperands in DFS order.
+class TreeWalkerState {
+ typedef MachineInstr::mop_iterator mop_iterator;
+ typedef std::reverse_iterator<mop_iterator> mop_reverse_iterator;
+ typedef iterator_range<mop_reverse_iterator> RangeTy;
+ SmallVector<RangeTy, 4> Worklist;
+
+public:
+ explicit TreeWalkerState(MachineInstr *Insert) {
+ const iterator_range<mop_iterator> &Range = Insert->explicit_uses();
+ if (Range.begin() != Range.end())
+ Worklist.push_back(reverse(Range));
+ }
+
+ bool Done() const { return Worklist.empty(); }
+
+ MachineOperand &Pop() {
+ RangeTy &Range = Worklist.back();
+ MachineOperand &Op = *Range.begin();
+ Range = drop_begin(Range, 1);
+ if (Range.begin() == Range.end())
+ Worklist.pop_back();
+ assert((Worklist.empty() ||
+ Worklist.back().begin() != Worklist.back().end()) &&
+ "Empty ranges shouldn't remain in the worklist");
+ return Op;
+ }
+
+ /// Push Instr's operands onto the stack to be visited.
+ void PushOperands(MachineInstr *Instr) {
+ const iterator_range<mop_iterator> &Range(Instr->explicit_uses());
+ if (Range.begin() != Range.end())
+ Worklist.push_back(reverse(Range));
+ }
+
+ /// Some of Instr's operands are on the top of the stack; remove them and
+ /// re-insert them starting from the beginning (because we've commuted them).
+ void ResetTopOperands(MachineInstr *Instr) {
+ assert(HasRemainingOperands(Instr) &&
+ "Reseting operands should only be done when the instruction has "
+ "an operand still on the stack");
+ Worklist.back() = reverse(Instr->explicit_uses());
+ }
+
+ /// Test whether Instr has operands remaining to be visited at the top of
+ /// the stack.
+ bool HasRemainingOperands(const MachineInstr *Instr) const {
+ if (Worklist.empty())
+ return false;
+ const RangeTy &Range = Worklist.back();
+ return Range.begin() != Range.end() && Range.begin()->getParent() == Instr;
+ }
+
+ /// Test whether the given register is present on the stack, indicating an
+ /// operand in the tree that we haven't visited yet. Moving a definition of
+ /// Reg to a point in the tree after that would change its value.
+ ///
+ /// This is needed as a consequence of using implicit get_locals for
+ /// uses and implicit set_locals for defs.
+ bool IsOnStack(unsigned Reg) const {
+ for (const RangeTy &Range : Worklist)
+ for (const MachineOperand &MO : Range)
+ if (MO.isReg() && MO.getReg() == Reg)
+ return true;
+ return false;
+ }
+};
+
+/// State to keep track of whether commuting is in flight or whether it's been
+/// tried for the current instruction and didn't work.
+class CommutingState {
+ /// There are effectively three states: the initial state where we haven't
+ /// started commuting anything and we don't know anything yet, the tenative
+ /// state where we've commuted the operands of the current instruction and are
+ /// revisting it, and the declined state where we've reverted the operands
+ /// back to their original order and will no longer commute it further.
+ bool TentativelyCommuting;
+ bool Declined;
+
+ /// During the tentative state, these hold the operand indices of the commuted
+ /// operands.
+ unsigned Operand0, Operand1;
+
+public:
+ CommutingState() : TentativelyCommuting(false), Declined(false) {}
+
+ /// Stackification for an operand was not successful due to ordering
+ /// constraints. If possible, and if we haven't already tried it and declined
+ /// it, commute Insert's operands and prepare to revisit it.
+ void MaybeCommute(MachineInstr *Insert, TreeWalkerState &TreeWalker,
+ const WebAssemblyInstrInfo *TII) {
+ if (TentativelyCommuting) {
+ assert(!Declined &&
+ "Don't decline commuting until you've finished trying it");
+ // Commuting didn't help. Revert it.
+ TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
+ TentativelyCommuting = false;
+ Declined = true;
+ } else if (!Declined && TreeWalker.HasRemainingOperands(Insert)) {
+ Operand0 = TargetInstrInfo::CommuteAnyOperandIndex;
+ Operand1 = TargetInstrInfo::CommuteAnyOperandIndex;
+ if (TII->findCommutedOpIndices(*Insert, Operand0, Operand1)) {
+ // Tentatively commute the operands and try again.
+ TII->commuteInstruction(*Insert, /*NewMI=*/false, Operand0, Operand1);
+ TreeWalker.ResetTopOperands(Insert);
+ TentativelyCommuting = true;
+ Declined = false;
+ }
+ }
+ }
+
+ /// Stackification for some operand was successful. Reset to the default
+ /// state.
+ void Reset() {
+ TentativelyCommuting = false;
+ Declined = false;
+ }
+};
+} // end anonymous namespace
+
+bool WebAssemblyRegStackify::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG(dbgs() << "********** Register Stackifying **********\n"
+ "********** Function: "
+ << MF.getName() << '\n');
+
+ bool Changed = false;
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ WebAssemblyFunctionInfo &MFI = *MF.getInfo<WebAssemblyFunctionInfo>();
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+ const auto *TRI = MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+ AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+ LiveIntervals &LIS = getAnalysis<LiveIntervals>();
+
+ // Disable the TEE optimization if we aren't doing direct wasm object
+ // emission, because lowering TEE to TEE_LOCAL is done in the ExplicitLocals
+ // pass, which is also disabled.
+ bool UseTee = true;
+ if (MF.getSubtarget<WebAssemblySubtarget>()
+ .getTargetTriple().isOSBinFormatELF())
+ UseTee = false;
+
+ // Walk the instructions from the bottom up. Currently we don't look past
+ // block boundaries, and the blocks aren't ordered so the block visitation
+ // order isn't significant, but we may want to change this in the future.
+ for (MachineBasicBlock &MBB : MF) {
+ // Don't use a range-based for loop, because we modify the list as we're
+ // iterating over it and the end iterator may change.
+ for (auto MII = MBB.rbegin(); MII != MBB.rend(); ++MII) {
+ MachineInstr *Insert = &*MII;
+ // Don't nest anything inside an inline asm, because we don't have
+ // constraints for $push inputs.
+ if (Insert->getOpcode() == TargetOpcode::INLINEASM)
+ continue;
+
+ // Ignore debugging intrinsics.
+ if (Insert->getOpcode() == TargetOpcode::DBG_VALUE)
+ continue;
+
+ // Iterate through the inputs in reverse order, since we'll be pulling
+ // operands off the stack in LIFO order.
+ CommutingState Commuting;
+ TreeWalkerState TreeWalker(Insert);
+ while (!TreeWalker.Done()) {
+ MachineOperand &Op = TreeWalker.Pop();
+
+ // We're only interested in explicit virtual register operands.
+ if (!Op.isReg())
+ continue;
+
+ unsigned Reg = Op.getReg();
+ assert(Op.isUse() && "explicit_uses() should only iterate over uses");
+ assert(!Op.isImplicit() &&
+ "explicit_uses() should only iterate over explicit operands");
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ continue;
+
+ // Identify the definition for this register at this point.
+ MachineInstr *Def = GetVRegDef(Reg, Insert, MRI, LIS);
+ if (!Def)
+ continue;
+
+ // Don't nest an INLINE_ASM def into anything, because we don't have
+ // constraints for $pop outputs.
+ if (Def->getOpcode() == TargetOpcode::INLINEASM)
+ continue;
+
+ // Argument instructions represent live-in registers and not real
+ // instructions.
+ if (WebAssembly::isArgument(*Def))
+ continue;
+
+ // Decide which strategy to take. Prefer to move a single-use value
+ // over cloning it, and prefer cloning over introducing a tee.
+ // For moving, we require the def to be in the same block as the use;
+ // this makes things simpler (LiveIntervals' handleMove function only
+ // supports intra-block moves) and it's MachineSink's job to catch all
+ // the sinking opportunities anyway.
+ bool SameBlock = Def->getParent() == &MBB;
+ bool CanMove = SameBlock && IsSafeToMove(Def, Insert, AA, MRI) &&
+ !TreeWalker.IsOnStack(Reg);
+ if (CanMove && HasOneUse(Reg, Def, MRI, MDT, LIS)) {
+ Insert = MoveForSingleUse(Reg, Op, Def, MBB, Insert, LIS, MFI, MRI);
+ } else if (ShouldRematerialize(*Def, AA, TII)) {
+ Insert =
+ RematerializeCheapDef(Reg, Op, *Def, MBB, Insert->getIterator(),
+ LIS, MFI, MRI, TII, TRI);
+ } else if (UseTee && CanMove &&
+ OneUseDominatesOtherUses(Reg, Op, MBB, MRI, MDT, LIS, MFI)) {
+ Insert = MoveAndTeeForMultiUse(Reg, Op, Def, MBB, Insert, LIS, MFI,
+ MRI, TII);
+ } else {
+ // We failed to stackify the operand. If the problem was ordering
+ // constraints, Commuting may be able to help.
+ if (!CanMove && SameBlock)
+ Commuting.MaybeCommute(Insert, TreeWalker, TII);
+ // Proceed to the next operand.
+ continue;
+ }
+
+ // If the instruction we just stackified is an IMPLICIT_DEF, convert it
+ // to a constant 0 so that the def is explicit, and the push/pop
+ // correspondence is maintained.
+ if (Insert->getOpcode() == TargetOpcode::IMPLICIT_DEF)
+ ConvertImplicitDefToConstZero(Insert, MRI, TII, MF);
+
+ // We stackified an operand. Add the defining instruction's operands to
+ // the worklist stack now to continue to build an ever deeper tree.
+ Commuting.Reset();
+ TreeWalker.PushOperands(Insert);
+ }
+
+ // If we stackified any operands, skip over the tree to start looking for
+ // the next instruction we can build a tree on.
+ if (Insert != &*MII) {
+ ImposeStackOrdering(&*MII);
+ MII = MachineBasicBlock::iterator(Insert).getReverse();
+ Changed = true;
+ }
+ }
+ }
+
+ // If we used VALUE_STACK anywhere, add it to the live-in sets everywhere so
+ // that it never looks like a use-before-def.
+ if (Changed) {
+ MF.getRegInfo().addLiveIn(WebAssembly::VALUE_STACK);
+ for (MachineBasicBlock &MBB : MF)
+ MBB.addLiveIn(WebAssembly::VALUE_STACK);
+ }
+
+#ifndef NDEBUG
+ // Verify that pushes and pops are performed in LIFO order.
+ SmallVector<unsigned, 0> Stack;
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ if (MI.isDebugValue())
+ continue;
+ for (MachineOperand &MO : reverse(MI.explicit_operands())) {
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+
+ if (MFI.isVRegStackified(Reg)) {
+ if (MO.isDef())
+ Stack.push_back(Reg);
+ else
+ assert(Stack.pop_back_val() == Reg &&
+ "Register stack pop should be paired with a push");
+ }
+ }
+ }
+ // TODO: Generalize this code to support keeping values on the stack across
+ // basic block boundaries.
+ assert(Stack.empty() &&
+ "Register stack pushes and pops should be balanced");
+ }
+#endif
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
new file mode 100644
index 000000000000..5e7ebd19fac7
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp
@@ -0,0 +1,148 @@
+//===-- WebAssemblyRegisterInfo.cpp - WebAssembly Register Information ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the WebAssembly implementation of the
+/// TargetRegisterInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyRegisterInfo.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyFrameLowering.h"
+#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/Target/TargetOptions.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-reg-info"
+
+#define GET_REGINFO_TARGET_DESC
+#include "WebAssemblyGenRegisterInfo.inc"
+
+WebAssemblyRegisterInfo::WebAssemblyRegisterInfo(const Triple &TT)
+ : WebAssemblyGenRegisterInfo(0), TT(TT) {}
+
+const MCPhysReg *
+WebAssemblyRegisterInfo::getCalleeSavedRegs(const MachineFunction *) const {
+ static const MCPhysReg CalleeSavedRegs[] = {0};
+ return CalleeSavedRegs;
+}
+
+BitVector
+WebAssemblyRegisterInfo::getReservedRegs(const MachineFunction & /*MF*/) const {
+ BitVector Reserved(getNumRegs());
+ for (auto Reg : {WebAssembly::SP32, WebAssembly::SP64, WebAssembly::FP32,
+ WebAssembly::FP64})
+ Reserved.set(Reg);
+ return Reserved;
+}
+
+void WebAssemblyRegisterInfo::eliminateFrameIndex(
+ MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum,
+ RegScavenger * /*RS*/) const {
+ assert(SPAdj == 0);
+ MachineInstr &MI = *II;
+
+ MachineBasicBlock &MBB = *MI.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ int64_t FrameOffset = MFI.getStackSize() + MFI.getObjectOffset(FrameIndex);
+
+ assert(MFI.getObjectSize(FrameIndex) != 0 &&
+ "We assume that variable-sized objects have already been lowered, "
+ "and don't use FrameIndex operands.");
+ unsigned FrameRegister = getFrameRegister(MF);
+
+ // If this is the address operand of a load or store, make it relative to SP
+ // and fold the frame offset directly in.
+ if ((MI.mayLoad() && FIOperandNum == WebAssembly::LoadAddressOperandNo) ||
+ (MI.mayStore() && FIOperandNum == WebAssembly::StoreAddressOperandNo)) {
+ assert(FrameOffset >= 0 && MI.getOperand(FIOperandNum - 1).getImm() >= 0);
+ int64_t Offset = MI.getOperand(FIOperandNum - 1).getImm() + FrameOffset;
+
+ if (static_cast<uint64_t>(Offset) <= std::numeric_limits<uint32_t>::max()) {
+ MI.getOperand(FIOperandNum - 1).setImm(Offset);
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(FrameRegister, /*IsDef=*/false);
+ return;
+ }
+ }
+
+ // If this is an address being added to a constant, fold the frame offset
+ // into the constant.
+ if (MI.getOpcode() == WebAssembly::ADD_I32) {
+ MachineOperand &OtherMO = MI.getOperand(3 - FIOperandNum);
+ if (OtherMO.isReg()) {
+ unsigned OtherMOReg = OtherMO.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(OtherMOReg)) {
+ MachineInstr *Def = MF.getRegInfo().getUniqueVRegDef(OtherMOReg);
+ // TODO: For now we just opportunistically do this in the case where
+ // the CONST_I32 happens to have exactly one def and one use. We
+ // should generalize this to optimize in more cases.
+ if (Def && Def->getOpcode() == WebAssembly::CONST_I32 &&
+ MRI.hasOneNonDBGUse(Def->getOperand(0).getReg())) {
+ MachineOperand &ImmMO = Def->getOperand(1);
+ ImmMO.setImm(ImmMO.getImm() + uint32_t(FrameOffset));
+ MI.getOperand(FIOperandNum)
+ .ChangeToRegister(FrameRegister, /*IsDef=*/false);
+ return;
+ }
+ }
+ }
+ }
+
+ // Otherwise create an i32.add SP, offset and make it the operand.
+ const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
+
+ unsigned FIRegOperand = FrameRegister;
+ if (FrameOffset) {
+ // Create i32.add SP, offset and make it the operand.
+ const TargetRegisterClass *PtrRC =
+ MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
+ unsigned OffsetOp = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, *II, II->getDebugLoc(), TII->get(WebAssembly::CONST_I32),
+ OffsetOp)
+ .addImm(FrameOffset);
+ FIRegOperand = MRI.createVirtualRegister(PtrRC);
+ BuildMI(MBB, *II, II->getDebugLoc(), TII->get(WebAssembly::ADD_I32),
+ FIRegOperand)
+ .addReg(FrameRegister)
+ .addReg(OffsetOp);
+ }
+ MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false);
+}
+
+unsigned
+WebAssemblyRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ static const unsigned Regs[2][2] = {
+ /* !isArch64Bit isArch64Bit */
+ /* !hasFP */ {WebAssembly::SP32, WebAssembly::SP64},
+ /* hasFP */ {WebAssembly::FP32, WebAssembly::FP64}};
+ const WebAssemblyFrameLowering *TFI = getFrameLowering(MF);
+ return Regs[TFI->hasFP(MF)][TT.isArch64Bit()];
+}
+
+const TargetRegisterClass *
+WebAssemblyRegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ assert(Kind == 0 && "Only one kind of pointer on WebAssembly");
+ if (MF.getSubtarget<WebAssemblySubtarget>().hasAddr64())
+ return &WebAssembly::I64RegClass;
+ return &WebAssembly::I32RegClass;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h
new file mode 100644
index 000000000000..ad1d71eebf22
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.h
@@ -0,0 +1,52 @@
+// WebAssemblyRegisterInfo.h - WebAssembly Register Information Impl -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the WebAssembly implementation of the
+/// WebAssemblyRegisterInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYREGISTERINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYREGISTERINFO_H
+
+#define GET_REGINFO_HEADER
+#include "WebAssemblyGenRegisterInfo.inc"
+
+namespace llvm {
+
+class MachineFunction;
+class RegScavenger;
+class TargetRegisterClass;
+class Triple;
+
+class WebAssemblyRegisterInfo final : public WebAssemblyGenRegisterInfo {
+ const Triple &TT;
+
+public:
+ explicit WebAssemblyRegisterInfo(const Triple &TT);
+
+ // Code Generation virtual methods.
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+ void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
+ unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
+
+ // Debug information queries.
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
+
+ const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind = 0) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td
new file mode 100644
index 000000000000..90888100be17
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegisterInfo.td
@@ -0,0 +1,62 @@
+//WebAssemblyRegisterInfo.td-Describe the WebAssembly Registers -*- tablegen -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file describes the WebAssembly register classes and some nominal
+/// physical registers.
+///
+//===----------------------------------------------------------------------===//
+
+class WebAssemblyReg<string n> : Register<n> {
+ let Namespace = "WebAssembly";
+}
+
+class WebAssemblyRegClass<list<ValueType> regTypes, int alignment, dag regList>
+ : RegisterClass<"WebAssembly", regTypes, alignment, regList>;
+
+//===----------------------------------------------------------------------===//
+// Registers
+//===----------------------------------------------------------------------===//
+
+// Special registers used as the frame and stack pointer.
+//
+// WebAssembly may someday supports mixed 32-bit and 64-bit heaps in the same
+// application, which requires separate width FP and SP.
+def FP32 : WebAssemblyReg<"%FP32">;
+def FP64 : WebAssemblyReg<"%FP64">;
+def SP32 : WebAssemblyReg<"%SP32">;
+def SP64 : WebAssemblyReg<"%SP64">;
+
+// The register allocation framework requires register classes have at least
+// one register, so we define a few for the floating point register classes
+// since we otherwise don't need a physical register in those classes.
+def F32_0 : WebAssemblyReg<"%f32.0">;
+def F64_0 : WebAssemblyReg<"%f64.0">;
+
+def V128_0: WebAssemblyReg<"%v128">;
+
+// The value stack "register". This is an opaque entity which serves to order
+// uses and defs that must remain in LIFO order.
+def VALUE_STACK : WebAssemblyReg<"STACK">;
+
+// The incoming arguments "register". This is an opaque entity which serves to
+// order the ARGUMENT instructions that are emulating live-in registers and
+// must not be scheduled below other instructions.
+def ARGUMENTS : WebAssemblyReg<"ARGUMENTS">;
+
+//===----------------------------------------------------------------------===//
+// Register classes
+//===----------------------------------------------------------------------===//
+
+def I32 : WebAssemblyRegClass<[i32], 32, (add FP32, SP32)>;
+def I64 : WebAssemblyRegClass<[i64], 64, (add FP64, SP64)>;
+def F32 : WebAssemblyRegClass<[f32], 32, (add F32_0)>;
+def F64 : WebAssemblyRegClass<[f64], 64, (add F64_0)>;
+def V128 : WebAssemblyRegClass<[v4f32, v4i32, v16i8, v8i16], 128, (add V128_0)>;
+
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
new file mode 100644
index 000000000000..878ffd08d228
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyReplacePhysRegs.cpp
@@ -0,0 +1,99 @@
+//===-- WebAssemblyReplacePhysRegs.cpp - Replace phys regs with virt regs -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a pass that replaces physical registers with
+/// virtual registers.
+///
+/// LLVM expects certain physical registers, such as a stack pointer. However,
+/// WebAssembly doesn't actually have such physical registers. This pass is run
+/// once LLVM no longer needs these registers, and replaces them with virtual
+/// registers, so they can participate in register stackifying and coloring in
+/// the normal way.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-replace-phys-regs"
+
+namespace {
+class WebAssemblyReplacePhysRegs final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyReplacePhysRegs() : MachineFunctionPass(ID) {}
+
+private:
+ StringRef getPassName() const override {
+ return "WebAssembly Replace Physical Registers";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblyReplacePhysRegs::ID = 0;
+FunctionPass *llvm::createWebAssemblyReplacePhysRegs() {
+ return new WebAssemblyReplacePhysRegs();
+}
+
+bool WebAssemblyReplacePhysRegs::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Replace Physical Registers **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const auto &TRI = *MF.getSubtarget<WebAssemblySubtarget>().getRegisterInfo();
+ bool Changed = false;
+
+ assert(!mustPreserveAnalysisID(LiveIntervalsID) &&
+ "LiveIntervals shouldn't be active yet!");
+ // We don't preserve SSA or liveness.
+ MRI.leaveSSA();
+ MRI.invalidateLiveness();
+
+ for (unsigned PReg = WebAssembly::NoRegister + 1;
+ PReg < WebAssembly::NUM_TARGET_REGS; ++PReg) {
+ // Skip fake registers that are never used explicitly.
+ if (PReg == WebAssembly::VALUE_STACK || PReg == WebAssembly::ARGUMENTS)
+ continue;
+
+ // Replace explicit uses of the physical register with a virtual register.
+ const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PReg);
+ unsigned VReg = WebAssembly::NoRegister;
+ for (auto I = MRI.reg_begin(PReg), E = MRI.reg_end(); I != E; ) {
+ MachineOperand &MO = *I++;
+ if (!MO.isImplicit()) {
+ if (VReg == WebAssembly::NoRegister)
+ VReg = MRI.createVirtualRegister(RC);
+ MO.setReg(VReg);
+ if (MO.getParent()->isDebugValue())
+ MO.setIsDebug();
+ Changed = true;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
new file mode 100644
index 000000000000..f808c063d7e4
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.cpp
@@ -0,0 +1,1329 @@
+// CodeGen/RuntimeLibcallSignatures.cpp - R.T. Lib. Call Signatures -*- C++ -*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains signature information for runtime libcalls.
+///
+/// CodeGen uses external symbols, which it refers to by name. The WebAssembly
+/// target needs type information for all functions. This file contains a big
+/// table providing type signatures for all runtime library functions that LLVM
+/// uses.
+///
+/// This is currently a fairly heavy-handed solution.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyRuntimeLibcallSignatures.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+using namespace llvm;
+
+namespace {
+
+enum RuntimeLibcallSignature {
+ func,
+ f32_func_f32,
+ f32_func_f64,
+ f32_func_i32,
+ f32_func_i64,
+ f32_func_i16,
+ f64_func_f32,
+ f64_func_f64,
+ f64_func_i32,
+ f64_func_i64,
+ i32_func_f32,
+ i32_func_f64,
+ i32_func_i32,
+ i64_func_f32,
+ i64_func_f64,
+ i64_func_i64,
+ f32_func_f32_f32,
+ f32_func_f32_i32,
+ f32_func_i64_i64,
+ f64_func_f64_f64,
+ f64_func_f64_i32,
+ f64_func_i64_i64,
+ i16_func_f32,
+ i8_func_i8_i8,
+ func_f32_iPTR_iPTR,
+ func_f64_iPTR_iPTR,
+ i16_func_i16_i16,
+ i32_func_f32_f32,
+ i32_func_f64_f64,
+ i32_func_i32_i32,
+ i64_func_i64_i64,
+ i64_i64_func_f32,
+ i64_i64_func_f64,
+ i16_i16_func_i16_i16,
+ i32_i32_func_i32_i32,
+ i64_i64_func_i64_i64,
+ i64_i64_func_i64_i64_i64_i64,
+ i64_i64_i64_i64_func_i64_i64_i64_i64,
+ i64_i64_func_i64_i64_i32,
+ iPTR_func_iPTR_i32_iPTR,
+ iPTR_func_iPTR_iPTR_iPTR,
+ f32_func_f32_f32_f32,
+ f64_func_f64_f64_f64,
+ func_i64_i64_iPTR_iPTR,
+ func_iPTR_f32,
+ func_iPTR_f64,
+ func_iPTR_i32,
+ func_iPTR_i64,
+ func_iPTR_i64_i64,
+ func_iPTR_i64_i64_i64_i64,
+ func_iPTR_i64_i64_i64_i64_i64_i64,
+ i32_func_i64_i64,
+ i32_func_i64_i64_i64_i64,
+ unsupported
+};
+
+} // end anonymous namespace
+
+static const RuntimeLibcallSignature
+RuntimeLibcallSignatures[RTLIB::UNKNOWN_LIBCALL] = {
+// Integer
+/* SHL_I16 */ i16_func_i16_i16,
+/* SHL_I32 */ i32_func_i32_i32,
+/* SHL_I64 */ i64_func_i64_i64,
+/* SHL_I128 */ i64_i64_func_i64_i64_i32,
+/* SRL_I16 */ i16_func_i16_i16,
+/* SRL_I32 */ i32_func_i32_i32,
+/* SRL_I64 */ i64_func_i64_i64,
+/* SRL_I128 */ i64_i64_func_i64_i64_i32,
+/* SRA_I16 */ i16_func_i16_i16,
+/* SRA_I32 */ i32_func_i32_i32,
+/* SRA_I64 */ i64_func_i64_i64,
+/* SRA_I128 */ i64_i64_func_i64_i64_i32,
+/* MUL_I8 */ i8_func_i8_i8,
+/* MUL_I16 */ i16_func_i16_i16,
+/* MUL_I32 */ i32_func_i32_i32,
+/* MUL_I64 */ i64_func_i64_i64,
+/* MUL_I128 */ i64_i64_func_i64_i64_i64_i64,
+/* MULO_I32 */ i32_func_i32_i32,
+/* MULO_I64 */ i64_func_i64_i64,
+/* MULO_I128 */ i64_i64_func_i64_i64_i64_i64,
+/* SDIV_I8 */ i8_func_i8_i8,
+/* SDIV_I16 */ i16_func_i16_i16,
+/* SDIV_I32 */ i32_func_i32_i32,
+/* SDIV_I64 */ i64_func_i64_i64,
+/* SDIV_I128 */ i64_i64_func_i64_i64_i64_i64,
+/* UDIV_I8 */ i8_func_i8_i8,
+/* UDIV_I16 */ i16_func_i16_i16,
+/* UDIV_I32 */ i32_func_i32_i32,
+/* UDIV_I64 */ i64_func_i64_i64,
+/* UDIV_I128 */ i64_i64_func_i64_i64_i64_i64,
+/* SREM_I8 */ i8_func_i8_i8,
+/* SREM_I16 */ i16_func_i16_i16,
+/* SREM_I32 */ i32_func_i32_i32,
+/* SREM_I64 */ i64_func_i64_i64,
+/* SREM_I128 */ i64_i64_func_i64_i64_i64_i64,
+/* UREM_I8 */ i8_func_i8_i8,
+/* UREM_I16 */ i16_func_i16_i16,
+/* UREM_I32 */ i32_func_i32_i32,
+/* UREM_I64 */ i64_func_i64_i64,
+/* UREM_I128 */ i64_i64_func_i64_i64_i64_i64,
+/* SDIVREM_I8 */ i8_func_i8_i8,
+/* SDIVREM_I16 */ i16_i16_func_i16_i16,
+/* SDIVREM_I32 */ i32_i32_func_i32_i32,
+/* SDIVREM_I64 */ i64_func_i64_i64,
+/* SDIVREM_I128 */ i64_i64_i64_i64_func_i64_i64_i64_i64,
+/* UDIVREM_I8 */ i8_func_i8_i8,
+/* UDIVREM_I16 */ i16_i16_func_i16_i16,
+/* UDIVREM_I32 */ i32_i32_func_i32_i32,
+/* UDIVREM_I64 */ i64_i64_func_i64_i64,
+/* UDIVREM_I128 */ i64_i64_i64_i64_func_i64_i64_i64_i64,
+/* NEG_I32 */ i32_func_i32,
+/* NEG_I64 */ i64_func_i64,
+
+// FLOATING POINT
+/* ADD_F32 */ f32_func_f32_f32,
+/* ADD_F64 */ f64_func_f64_f64,
+/* ADD_F80 */ unsupported,
+/* ADD_F128 */ func_iPTR_i64_i64_i64_i64,
+/* ADD_PPCF128 */ unsupported,
+/* SUB_F32 */ f32_func_f32_f32,
+/* SUB_F64 */ f64_func_f64_f64,
+/* SUB_F80 */ unsupported,
+/* SUB_F128 */ func_iPTR_i64_i64_i64_i64,
+/* SUB_PPCF128 */ unsupported,
+/* MUL_F32 */ f32_func_f32_f32,
+/* MUL_F64 */ f64_func_f64_f64,
+/* MUL_F80 */ unsupported,
+/* MUL_F128 */ func_iPTR_i64_i64_i64_i64,
+/* MUL_PPCF128 */ unsupported,
+/* DIV_F32 */ f32_func_f32_f32,
+/* DIV_F64 */ f64_func_f64_f64,
+/* DIV_F80 */ unsupported,
+/* DIV_F128 */ func_iPTR_i64_i64_i64_i64,
+/* DIV_PPCF128 */ unsupported,
+/* REM_F32 */ f32_func_f32_f32,
+/* REM_F64 */ f64_func_f64_f64,
+/* REM_F80 */ unsupported,
+/* REM_F128 */ func_iPTR_i64_i64_i64_i64,
+/* REM_PPCF128 */ unsupported,
+/* FMA_F32 */ f32_func_f32_f32_f32,
+/* FMA_F64 */ f64_func_f64_f64_f64,
+/* FMA_F80 */ unsupported,
+/* FMA_F128 */ func_iPTR_i64_i64_i64_i64_i64_i64,
+/* FMA_PPCF128 */ unsupported,
+/* POWI_F32 */ f32_func_f32_i32,
+/* POWI_F64 */ f64_func_f64_i32,
+/* POWI_F80 */ unsupported,
+/* POWI_F128 */ func_iPTR_i64_i64_i64_i64,
+/* POWI_PPCF128 */ unsupported,
+/* SQRT_F32 */ f32_func_f32,
+/* SQRT_F64 */ f64_func_f64,
+/* SQRT_F80 */ unsupported,
+/* SQRT_F128 */ func_iPTR_i64_i64,
+/* SQRT_PPCF128 */ unsupported,
+/* LOG_F32 */ f32_func_f32,
+/* LOG_F64 */ f64_func_f64,
+/* LOG_F80 */ unsupported,
+/* LOG_F128 */ func_iPTR_i64_i64,
+/* LOG_PPCF128 */ unsupported,
+/* LOG2_F32 */ f32_func_f32,
+/* LOG2_F64 */ f64_func_f64,
+/* LOG2_F80 */ unsupported,
+/* LOG2_F128 */ func_iPTR_i64_i64,
+/* LOG2_PPCF128 */ unsupported,
+/* LOG10_F32 */ f32_func_f32,
+/* LOG10_F64 */ f64_func_f64,
+/* LOG10_F80 */ unsupported,
+/* LOG10_F128 */ func_iPTR_i64_i64,
+/* LOG10_PPCF128 */ unsupported,
+/* EXP_F32 */ f32_func_f32,
+/* EXP_F64 */ f64_func_f64,
+/* EXP_F80 */ unsupported,
+/* EXP_F128 */ func_iPTR_i64_i64,
+/* EXP_PPCF128 */ unsupported,
+/* EXP2_F32 */ f32_func_f32,
+/* EXP2_F64 */ f64_func_f64,
+/* EXP2_F80 */ unsupported,
+/* EXP2_F128 */ func_iPTR_i64_i64,
+/* EXP2_PPCF128 */ unsupported,
+/* SIN_F32 */ f32_func_f32,
+/* SIN_F64 */ f64_func_f64,
+/* SIN_F80 */ unsupported,
+/* SIN_F128 */ func_iPTR_i64_i64,
+/* SIN_PPCF128 */ unsupported,
+/* COS_F32 */ f32_func_f32,
+/* COS_F64 */ f64_func_f64,
+/* COS_F80 */ unsupported,
+/* COS_F128 */ func_iPTR_i64_i64,
+/* COS_PPCF128 */ unsupported,
+/* SINCOS_F32 */ func_f32_iPTR_iPTR,
+/* SINCOS_F64 */ func_f64_iPTR_iPTR,
+/* SINCOS_F80 */ unsupported,
+/* SINCOS_F128 */ func_i64_i64_iPTR_iPTR,
+/* SINCOS_PPCF128 */ unsupported,
+/* SINCOS_STRET_F32 */ unsupported,
+/* SINCOS_STRET_F64 */ unsupported,
+/* POW_F32 */ f32_func_f32_f32,
+/* POW_F64 */ f64_func_f64_f64,
+/* POW_F80 */ unsupported,
+/* POW_F128 */ func_iPTR_i64_i64_i64_i64,
+/* POW_PPCF128 */ unsupported,
+/* CEIL_F32 */ f32_func_f32,
+/* CEIL_F64 */ f64_func_f64,
+/* CEIL_F80 */ unsupported,
+/* CEIL_F128 */ func_iPTR_i64_i64,
+/* CEIL_PPCF128 */ unsupported,
+/* TRUNC_F32 */ f32_func_f32,
+/* TRUNC_F64 */ f64_func_f64,
+/* TRUNC_F80 */ unsupported,
+/* TRUNC_F128 */ func_iPTR_i64_i64,
+/* TRUNC_PPCF128 */ unsupported,
+/* RINT_F32 */ f32_func_f32,
+/* RINT_F64 */ f64_func_f64,
+/* RINT_F80 */ unsupported,
+/* RINT_F128 */ func_iPTR_i64_i64,
+/* RINT_PPCF128 */ unsupported,
+/* NEARBYINT_F32 */ f32_func_f32,
+/* NEARBYINT_F64 */ f64_func_f64,
+/* NEARBYINT_F80 */ unsupported,
+/* NEARBYINT_F128 */ func_iPTR_i64_i64,
+/* NEARBYINT_PPCF128 */ unsupported,
+/* ROUND_F32 */ f32_func_f32,
+/* ROUND_F64 */ f64_func_f64,
+/* ROUND_F80 */ unsupported,
+/* ROUND_F128 */ func_iPTR_i64_i64,
+/* ROUND_PPCF128 */ unsupported,
+/* FLOOR_F32 */ f32_func_f32,
+/* FLOOR_F64 */ f64_func_f64,
+/* FLOOR_F80 */ unsupported,
+/* FLOOR_F128 */ func_iPTR_i64_i64,
+/* FLOOR_PPCF128 */ unsupported,
+/* COPYSIGN_F32 */ f32_func_f32_f32,
+/* COPYSIGN_F64 */ f64_func_f64_f64,
+/* COPYSIGN_F80 */ unsupported,
+/* COPYSIGN_F128 */ func_iPTR_i64_i64_i64_i64,
+/* COPYSIGN_PPCF128 */ unsupported,
+/* FMIN_F32 */ f32_func_f32_f32,
+/* FMIN_F64 */ f64_func_f64_f64,
+/* FMIN_F80 */ unsupported,
+/* FMIN_F128 */ func_iPTR_i64_i64_i64_i64,
+/* FMIN_PPCF128 */ unsupported,
+/* FMAX_F32 */ f32_func_f32_f32,
+/* FMAX_F64 */ f64_func_f64_f64,
+/* FMAX_F80 */ unsupported,
+/* FMAX_F128 */ func_iPTR_i64_i64_i64_i64,
+/* FMAX_PPCF128 */ unsupported,
+
+// CONVERSION
+/* FPEXT_F32_PPCF128 */ unsupported,
+/* FPEXT_F64_PPCF128 */ unsupported,
+/* FPEXT_F64_F128 */ func_iPTR_f64,
+/* FPEXT_F32_F128 */ func_iPTR_f32,
+/* FPEXT_F32_F64 */ f64_func_f32,
+/* FPEXT_F16_F32 */ f32_func_i16,
+/* FPROUND_F32_F16 */ i16_func_f32,
+/* FPROUND_F64_F16 */ unsupported,
+/* FPROUND_F80_F16 */ unsupported,
+/* FPROUND_F128_F16 */ unsupported,
+/* FPROUND_PPCF128_F16 */ unsupported,
+/* FPROUND_F64_F32 */ f32_func_f64,
+/* FPROUND_F80_F32 */ unsupported,
+/* FPROUND_F128_F32 */ f32_func_i64_i64,
+/* FPROUND_PPCF128_F32 */ unsupported,
+/* FPROUND_F80_F64 */ unsupported,
+/* FPROUND_F128_F64 */ f64_func_i64_i64,
+/* FPROUND_PPCF128_F64 */ unsupported,
+/* FPTOSINT_F32_I32 */ i32_func_f32,
+/* FPTOSINT_F32_I64 */ i64_func_f32,
+/* FPTOSINT_F32_I128 */ i64_i64_func_f32,
+/* FPTOSINT_F64_I32 */ i32_func_f64,
+/* FPTOSINT_F64_I64 */ i64_func_f64,
+/* FPTOSINT_F64_I128 */ i64_i64_func_f64,
+/* FPTOSINT_F80_I32 */ unsupported,
+/* FPTOSINT_F80_I64 */ unsupported,
+/* FPTOSINT_F80_I128 */ unsupported,
+/* FPTOSINT_F128_I32 */ i32_func_i64_i64,
+/* FPTOSINT_F128_I64 */ i64_func_i64_i64,
+/* FPTOSINT_F128_I128 */ i64_i64_func_i64_i64,
+/* FPTOSINT_PPCF128_I32 */ unsupported,
+/* FPTOSINT_PPCF128_I64 */ unsupported,
+/* FPTOSINT_PPCF128_I128 */ unsupported,
+/* FPTOUINT_F32_I32 */ i32_func_f32,
+/* FPTOUINT_F32_I64 */ i64_func_f32,
+/* FPTOUINT_F32_I128 */ i64_i64_func_f32,
+/* FPTOUINT_F64_I32 */ i32_func_f64,
+/* FPTOUINT_F64_I64 */ i64_func_f64,
+/* FPTOUINT_F64_I128 */ i64_i64_func_f64,
+/* FPTOUINT_F80_I32 */ unsupported,
+/* FPTOUINT_F80_I64 */ unsupported,
+/* FPTOUINT_F80_I128 */ unsupported,
+/* FPTOUINT_F128_I32 */ i32_func_i64_i64,
+/* FPTOUINT_F128_I64 */ i64_func_i64_i64,
+/* FPTOUINT_F128_I128 */ i64_i64_func_i64_i64,
+/* FPTOUINT_PPCF128_I32 */ unsupported,
+/* FPTOUINT_PPCF128_I64 */ unsupported,
+/* FPTOUINT_PPCF128_I128 */ unsupported,
+/* SINTTOFP_I32_F32 */ f32_func_i32,
+/* SINTTOFP_I32_F64 */ f64_func_i32,
+/* SINTTOFP_I32_F80 */ unsupported,
+/* SINTTOFP_I32_F128 */ func_iPTR_i32,
+/* SINTTOFP_I32_PPCF128 */ unsupported,
+/* SINTTOFP_I64_F32 */ f32_func_i64,
+/* SINTTOFP_I64_F64 */ f64_func_i64,
+/* SINTTOFP_I64_F80 */ unsupported,
+/* SINTTOFP_I64_F128 */ func_iPTR_i64,
+/* SINTTOFP_I64_PPCF128 */ unsupported,
+/* SINTTOFP_I128_F32 */ f32_func_i64_i64,
+/* SINTTOFP_I128_F64 */ f64_func_i64_i64,
+/* SINTTOFP_I128_F80 */ unsupported,
+/* SINTTOFP_I128_F128 */ func_iPTR_i64_i64,
+/* SINTTOFP_I128_PPCF128 */ unsupported,
+/* UINTTOFP_I32_F32 */ f32_func_i32,
+/* UINTTOFP_I32_F64 */ f64_func_i64,
+/* UINTTOFP_I32_F80 */ unsupported,
+/* UINTTOFP_I32_F128 */ func_iPTR_i32,
+/* UINTTOFP_I32_PPCF128 */ unsupported,
+/* UINTTOFP_I64_F32 */ f32_func_i64,
+/* UINTTOFP_I64_F64 */ f64_func_i64,
+/* UINTTOFP_I64_F80 */ unsupported,
+/* UINTTOFP_I64_F128 */ func_iPTR_i64,
+/* UINTTOFP_I64_PPCF128 */ unsupported,
+/* UINTTOFP_I128_F32 */ f32_func_i64_i64,
+/* UINTTOFP_I128_F64 */ f64_func_i64_i64,
+/* UINTTOFP_I128_F80 */ unsupported,
+/* UINTTOFP_I128_F128 */ func_iPTR_i64_i64,
+/* UINTTOFP_I128_PPCF128 */ unsupported,
+
+// COMPARISON
+/* OEQ_F32 */ i32_func_f32_f32,
+/* OEQ_F64 */ i32_func_f64_f64,
+/* OEQ_F128 */ i32_func_i64_i64_i64_i64,
+/* OEQ_PPCF128 */ unsupported,
+/* UNE_F32 */ i32_func_f32_f32,
+/* UNE_F64 */ i32_func_f64_f64,
+/* UNE_F128 */ i32_func_i64_i64_i64_i64,
+/* UNE_PPCF128 */ unsupported,
+/* OGE_F32 */ i32_func_f32_f32,
+/* OGE_F64 */ i32_func_f64_f64,
+/* OGE_F128 */ i32_func_i64_i64_i64_i64,
+/* OGE_PPCF128 */ unsupported,
+/* OLT_F32 */ i32_func_f32_f32,
+/* OLT_F64 */ i32_func_f64_f64,
+/* OLT_F128 */ i32_func_i64_i64_i64_i64,
+/* OLT_PPCF128 */ unsupported,
+/* OLE_F32 */ i32_func_f32_f32,
+/* OLE_F64 */ i32_func_f64_f64,
+/* OLE_F128 */ i32_func_i64_i64_i64_i64,
+/* OLE_PPCF128 */ unsupported,
+/* OGT_F32 */ i32_func_f32_f32,
+/* OGT_F64 */ i32_func_f64_f64,
+/* OGT_F128 */ i32_func_i64_i64_i64_i64,
+/* OGT_PPCF128 */ unsupported,
+/* UO_F32 */ i32_func_f32_f32,
+/* UO_F64 */ i32_func_f64_f64,
+/* UO_F128 */ i32_func_i64_i64_i64_i64,
+/* UO_PPCF128 */ unsupported,
+/* O_F32 */ i32_func_f32_f32,
+/* O_F64 */ i32_func_f64_f64,
+/* O_F128 */ i32_func_i64_i64_i64_i64,
+/* O_PPCF128 */ unsupported,
+
+// MEMORY
+/* MEMCPY */ iPTR_func_iPTR_iPTR_iPTR,
+/* MEMMOVE */ iPTR_func_iPTR_iPTR_iPTR,
+/* MEMSET */ iPTR_func_iPTR_i32_iPTR,
+/* BZERO */ unsupported,
+
+// ELEMENT-WISE ATOMIC MEMORY
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported,
+
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_1 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_2 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_4 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_8 */ unsupported,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_16 */ unsupported,
+
+// EXCEPTION HANDLING
+/* UNWIND_RESUME */ unsupported,
+
+// Note: there's two sets of atomics libcalls; see
+// <http://llvm.org/docs/Atomics.html> for more info on the
+// difference between them.
+
+// Atomic '__sync_*' libcalls.
+/* SYNC_VAL_COMPARE_AND_SWAP_1 */ unsupported,
+/* SYNC_VAL_COMPARE_AND_SWAP_2 */ unsupported,
+/* SYNC_VAL_COMPARE_AND_SWAP_4 */ unsupported,
+/* SYNC_VAL_COMPARE_AND_SWAP_8 */ unsupported,
+/* SYNC_VAL_COMPARE_AND_SWAP_16 */ unsupported,
+/* SYNC_LOCK_TEST_AND_SET_1 */ unsupported,
+/* SYNC_LOCK_TEST_AND_SET_2 */ unsupported,
+/* SYNC_LOCK_TEST_AND_SET_4 */ unsupported,
+/* SYNC_LOCK_TEST_AND_SET_8 */ unsupported,
+/* SYNC_LOCK_TEST_AND_SET_16 */ unsupported,
+/* SYNC_FETCH_AND_ADD_1 */ unsupported,
+/* SYNC_FETCH_AND_ADD_2 */ unsupported,
+/* SYNC_FETCH_AND_ADD_4 */ unsupported,
+/* SYNC_FETCH_AND_ADD_8 */ unsupported,
+/* SYNC_FETCH_AND_ADD_16 */ unsupported,
+/* SYNC_FETCH_AND_SUB_1 */ unsupported,
+/* SYNC_FETCH_AND_SUB_2 */ unsupported,
+/* SYNC_FETCH_AND_SUB_4 */ unsupported,
+/* SYNC_FETCH_AND_SUB_8 */ unsupported,
+/* SYNC_FETCH_AND_SUB_16 */ unsupported,
+/* SYNC_FETCH_AND_AND_1 */ unsupported,
+/* SYNC_FETCH_AND_AND_2 */ unsupported,
+/* SYNC_FETCH_AND_AND_4 */ unsupported,
+/* SYNC_FETCH_AND_AND_8 */ unsupported,
+/* SYNC_FETCH_AND_AND_16 */ unsupported,
+/* SYNC_FETCH_AND_OR_1 */ unsupported,
+/* SYNC_FETCH_AND_OR_2 */ unsupported,
+/* SYNC_FETCH_AND_OR_4 */ unsupported,
+/* SYNC_FETCH_AND_OR_8 */ unsupported,
+/* SYNC_FETCH_AND_OR_16 */ unsupported,
+/* SYNC_FETCH_AND_XOR_1 */ unsupported,
+/* SYNC_FETCH_AND_XOR_2 */ unsupported,
+/* SYNC_FETCH_AND_XOR_4 */ unsupported,
+/* SYNC_FETCH_AND_XOR_8 */ unsupported,
+/* SYNC_FETCH_AND_XOR_16 */ unsupported,
+/* SYNC_FETCH_AND_NAND_1 */ unsupported,
+/* SYNC_FETCH_AND_NAND_2 */ unsupported,
+/* SYNC_FETCH_AND_NAND_4 */ unsupported,
+/* SYNC_FETCH_AND_NAND_8 */ unsupported,
+/* SYNC_FETCH_AND_NAND_16 */ unsupported,
+/* SYNC_FETCH_AND_MAX_1 */ unsupported,
+/* SYNC_FETCH_AND_MAX_2 */ unsupported,
+/* SYNC_FETCH_AND_MAX_4 */ unsupported,
+/* SYNC_FETCH_AND_MAX_8 */ unsupported,
+/* SYNC_FETCH_AND_MAX_16 */ unsupported,
+/* SYNC_FETCH_AND_UMAX_1 */ unsupported,
+/* SYNC_FETCH_AND_UMAX_2 */ unsupported,
+/* SYNC_FETCH_AND_UMAX_4 */ unsupported,
+/* SYNC_FETCH_AND_UMAX_8 */ unsupported,
+/* SYNC_FETCH_AND_UMAX_16 */ unsupported,
+/* SYNC_FETCH_AND_MIN_1 */ unsupported,
+/* SYNC_FETCH_AND_MIN_2 */ unsupported,
+/* SYNC_FETCH_AND_MIN_4 */ unsupported,
+/* SYNC_FETCH_AND_MIN_8 */ unsupported,
+/* SYNC_FETCH_AND_MIN_16 */ unsupported,
+/* SYNC_FETCH_AND_UMIN_1 */ unsupported,
+/* SYNC_FETCH_AND_UMIN_2 */ unsupported,
+/* SYNC_FETCH_AND_UMIN_4 */ unsupported,
+/* SYNC_FETCH_AND_UMIN_8 */ unsupported,
+/* SYNC_FETCH_AND_UMIN_16 */ unsupported,
+
+// Atomic '__atomic_*' libcalls.
+/* ATOMIC_LOAD */ unsupported,
+/* ATOMIC_LOAD_1 */ unsupported,
+/* ATOMIC_LOAD_2 */ unsupported,
+/* ATOMIC_LOAD_4 */ unsupported,
+/* ATOMIC_LOAD_8 */ unsupported,
+/* ATOMIC_LOAD_16 */ unsupported,
+
+/* ATOMIC_STORE */ unsupported,
+/* ATOMIC_STORE_1 */ unsupported,
+/* ATOMIC_STORE_2 */ unsupported,
+/* ATOMIC_STORE_4 */ unsupported,
+/* ATOMIC_STORE_8 */ unsupported,
+/* ATOMIC_STORE_16 */ unsupported,
+
+/* ATOMIC_EXCHANGE */ unsupported,
+/* ATOMIC_EXCHANGE_1 */ unsupported,
+/* ATOMIC_EXCHANGE_2 */ unsupported,
+/* ATOMIC_EXCHANGE_4 */ unsupported,
+/* ATOMIC_EXCHANGE_8 */ unsupported,
+/* ATOMIC_EXCHANGE_16 */ unsupported,
+
+/* ATOMIC_COMPARE_EXCHANGE */ unsupported,
+/* ATOMIC_COMPARE_EXCHANGE_1 */ unsupported,
+/* ATOMIC_COMPARE_EXCHANGE_2 */ unsupported,
+/* ATOMIC_COMPARE_EXCHANGE_4 */ unsupported,
+/* ATOMIC_COMPARE_EXCHANGE_8 */ unsupported,
+/* ATOMIC_COMPARE_EXCHANGE_16 */ unsupported,
+
+/* ATOMIC_FETCH_ADD_1 */ unsupported,
+/* ATOMIC_FETCH_ADD_2 */ unsupported,
+/* ATOMIC_FETCH_ADD_4 */ unsupported,
+/* ATOMIC_FETCH_ADD_8 */ unsupported,
+/* ATOMIC_FETCH_ADD_16 */ unsupported,
+
+/* ATOMIC_FETCH_SUB_1 */ unsupported,
+/* ATOMIC_FETCH_SUB_2 */ unsupported,
+/* ATOMIC_FETCH_SUB_4 */ unsupported,
+/* ATOMIC_FETCH_SUB_8 */ unsupported,
+/* ATOMIC_FETCH_SUB_16 */ unsupported,
+
+/* ATOMIC_FETCH_AND_1 */ unsupported,
+/* ATOMIC_FETCH_AND_2 */ unsupported,
+/* ATOMIC_FETCH_AND_4 */ unsupported,
+/* ATOMIC_FETCH_AND_8 */ unsupported,
+/* ATOMIC_FETCH_AND_16 */ unsupported,
+
+/* ATOMIC_FETCH_OR_1 */ unsupported,
+/* ATOMIC_FETCH_OR_2 */ unsupported,
+/* ATOMIC_FETCH_OR_4 */ unsupported,
+/* ATOMIC_FETCH_OR_8 */ unsupported,
+/* ATOMIC_FETCH_OR_16 */ unsupported,
+
+/* ATOMIC_FETCH_XOR_1 */ unsupported,
+/* ATOMIC_FETCH_XOR_2 */ unsupported,
+/* ATOMIC_FETCH_XOR_4 */ unsupported,
+/* ATOMIC_FETCH_XOR_8 */ unsupported,
+/* ATOMIC_FETCH_XOR_16 */ unsupported,
+
+/* ATOMIC_FETCH_NAND_1 */ unsupported,
+/* ATOMIC_FETCH_NAND_2 */ unsupported,
+/* ATOMIC_FETCH_NAND_4 */ unsupported,
+/* ATOMIC_FETCH_NAND_8 */ unsupported,
+/* ATOMIC_FETCH_NAND_16 */ unsupported,
+
+// Stack Protector Fail.
+/* STACKPROTECTOR_CHECK_FAIL */ func,
+
+// Deoptimization.
+/* DEOPTIMIZE */ unsupported,
+
+};
+
+static const char *
+RuntimeLibcallNames[RTLIB::UNKNOWN_LIBCALL] = {
+/* SHL_I16 */ "__ashlhi3",
+/* SHL_I32 */ "__ashlsi3",
+/* SHL_I64 */ "__ashldi3",
+/* SHL_I128 */ "__ashlti3",
+/* SRL_I16 */ "__lshrhi3",
+/* SRL_I32 */ "__lshrsi3",
+/* SRL_I64 */ "__lshrdi3",
+/* SRL_I128 */ "__lshrti3",
+/* SRA_I16 */ "__ashrhi3",
+/* SRA_I32 */ "__ashrsi3",
+/* SRA_I64 */ "__ashrdi3",
+/* SRA_I128 */ "__ashrti3",
+/* MUL_I8 */ "__mulqi3",
+/* MUL_I16 */ "__mulhi3",
+/* MUL_I32 */ "__mulsi3",
+/* MUL_I64 */ "__muldi3",
+/* MUL_I128 */ "__multi3",
+/* MULO_I32 */ "__mulosi4",
+/* MULO_I64 */ "__mulodi4",
+/* MULO_I128 */ "__muloti4",
+/* SDIV_I8 */ "__divqi3",
+/* SDIV_I16 */ "__divhi3",
+/* SDIV_I32 */ "__divsi3",
+/* SDIV_I64 */ "__divdi3",
+/* SDIV_I128 */ "__divti3",
+/* UDIV_I8 */ "__udivqi3",
+/* UDIV_I16 */ "__udivhi3",
+/* UDIV_I32 */ "__udivsi3",
+/* UDIV_I64 */ "__udivdi3",
+/* UDIV_I128 */ "__udivti3",
+/* SREM_I8 */ "__modqi3",
+/* SREM_I16 */ "__modhi3",
+/* SREM_I32 */ "__modsi3",
+/* SREM_I64 */ "__moddi3",
+/* SREM_I128 */ "__modti3",
+/* UREM_I8 */ "__umodqi3",
+/* UREM_I16 */ "__umodhi3",
+/* UREM_I32 */ "__umodsi3",
+/* UREM_I64 */ "__umoddi3",
+/* UREM_I128 */ "__umodti3",
+/* SDIVREM_I8 */ nullptr,
+/* SDIVREM_I16 */ nullptr,
+/* SDIVREM_I32 */ nullptr,
+/* SDIVREM_I64 */ nullptr,
+/* SDIVREM_I128 */ nullptr,
+/* UDIVREM_I8 */ nullptr,
+/* UDIVREM_I16 */ nullptr,
+/* UDIVREM_I32 */ nullptr,
+/* UDIVREM_I64 */ nullptr,
+/* UDIVREM_I128 */ nullptr,
+/* NEG_I32 */ "__negsi2",
+/* NEG_I64 */ "__negdi2",
+/* ADD_F32 */ "__addsf3",
+/* ADD_F64 */ "__adddf3",
+/* ADD_F80 */ nullptr,
+/* ADD_F128 */ "__addtf3",
+/* ADD_PPCF128 */ nullptr,
+/* SUB_F32 */ "__subsf3",
+/* SUB_F64 */ "__subdf3",
+/* SUB_F80 */ nullptr,
+/* SUB_F128 */ "__subtf3",
+/* SUB_PPCF128 */ nullptr,
+/* MUL_F32 */ "__mulsf3",
+/* MUL_F64 */ "__muldf3",
+/* MUL_F80 */ nullptr,
+/* MUL_F128 */ "__multf3",
+/* MUL_PPCF128 */ nullptr,
+/* DIV_F32 */ "__divsf3",
+/* DIV_F64 */ "__divdf3",
+/* DIV_F80 */ nullptr,
+/* DIV_F128 */ "__divtf3",
+/* DIV_PPCF128 */ nullptr,
+/* REM_F32 */ "fmodf",
+/* REM_F64 */ "fmod",
+/* REM_F80 */ nullptr,
+/* REM_F128 */ "fmodl",
+/* REM_PPCF128 */ nullptr,
+/* FMA_F32 */ "fmaf",
+/* FMA_F64 */ "fma",
+/* FMA_F80 */ nullptr,
+/* FMA_F128 */ "fmal",
+/* FMA_PPCF128 */ nullptr,
+/* POWI_F32 */ "__powisf2",
+/* POWI_F64 */ "__powidf2",
+/* POWI_F80 */ nullptr,
+/* POWI_F128 */ "__powitf2",
+/* POWI_PPCF128 */ nullptr,
+/* SQRT_F32 */ "sqrtf",
+/* SQRT_F64 */ "sqrt",
+/* SQRT_F80 */ nullptr,
+/* SQRT_F128 */ "sqrtl",
+/* SQRT_PPCF128 */ nullptr,
+/* LOG_F32 */ "logf",
+/* LOG_F64 */ "log",
+/* LOG_F80 */ nullptr,
+/* LOG_F128 */ "logl",
+/* LOG_PPCF128 */ nullptr,
+/* LOG2_F32 */ "log2f",
+/* LOG2_F64 */ "log2",
+/* LOG2_F80 */ nullptr,
+/* LOG2_F128 */ "log2l",
+/* LOG2_PPCF128 */ nullptr,
+/* LOG10_F32 */ "log10f",
+/* LOG10_F64 */ "log10",
+/* LOG10_F80 */ nullptr,
+/* LOG10_F128 */ "log10l",
+/* LOG10_PPCF128 */ nullptr,
+/* EXP_F32 */ "expf",
+/* EXP_F64 */ "exp",
+/* EXP_F80 */ nullptr,
+/* EXP_F128 */ "expl",
+/* EXP_PPCF128 */ nullptr,
+/* EXP2_F32 */ "exp2f",
+/* EXP2_F64 */ "exp2",
+/* EXP2_F80 */ nullptr,
+/* EXP2_F128 */ "exp2l",
+/* EXP2_PPCF128 */ nullptr,
+/* SIN_F32 */ "sinf",
+/* SIN_F64 */ "sin",
+/* SIN_F80 */ nullptr,
+/* SIN_F128 */ "sinl",
+/* SIN_PPCF128 */ nullptr,
+/* COS_F32 */ "cosf",
+/* COS_F64 */ "cos",
+/* COS_F80 */ nullptr,
+/* COS_F128 */ "cosl",
+/* COS_PPCF128 */ nullptr,
+/* SINCOS_F32 */ "sincosf",
+/* SINCOS_F64 */ "sincos",
+/* SINCOS_F80 */ nullptr,
+/* SINCOS_F128 */ "sincosl",
+/* SINCOS_PPCF128 */ nullptr,
+/* SINCOS_STRET_F32 */ nullptr,
+/* SINCOS_STRET_F64 */ nullptr,
+/* POW_F32 */ "powf",
+/* POW_F64 */ "pow",
+/* POW_F80 */ nullptr,
+/* POW_F128 */ "powl",
+/* POW_PPCF128 */ nullptr,
+/* CEIL_F32 */ "ceilf",
+/* CEIL_F64 */ "ceil",
+/* CEIL_F80 */ nullptr,
+/* CEIL_F128 */ "ceill",
+/* CEIL_PPCF128 */ nullptr,
+/* TRUNC_F32 */ "truncf",
+/* TRUNC_F64 */ "trunc",
+/* TRUNC_F80 */ nullptr,
+/* TRUNC_F128 */ "truncl",
+/* TRUNC_PPCF128 */ nullptr,
+/* RINT_F32 */ "rintf",
+/* RINT_F64 */ "rint",
+/* RINT_F80 */ nullptr,
+/* RINT_F128 */ "rintl",
+/* RINT_PPCF128 */ nullptr,
+/* NEARBYINT_F32 */ "nearbyintf",
+/* NEARBYINT_F64 */ "nearbyint",
+/* NEARBYINT_F80 */ nullptr,
+/* NEARBYINT_F128 */ "nearbyintl",
+/* NEARBYINT_PPCF128 */ nullptr,
+/* ROUND_F32 */ "roundf",
+/* ROUND_F64 */ "round",
+/* ROUND_F80 */ nullptr,
+/* ROUND_F128 */ "roundl",
+/* ROUND_PPCF128 */ nullptr,
+/* FLOOR_F32 */ "floorf",
+/* FLOOR_F64 */ "floor",
+/* FLOOR_F80 */ nullptr,
+/* FLOOR_F128 */ "floorl",
+/* FLOOR_PPCF128 */ nullptr,
+/* COPYSIGN_F32 */ "copysignf",
+/* COPYSIGN_F64 */ "copysign",
+/* COPYSIGN_F80 */ nullptr,
+/* COPYSIGN_F128 */ "copysignl",
+/* COPYSIGN_PPCF128 */ nullptr,
+/* FMIN_F32 */ "fminf",
+/* FMIN_F64 */ "fmin",
+/* FMIN_F80 */ nullptr,
+/* FMIN_F128 */ "fminl",
+/* FMIN_PPCF128 */ nullptr,
+/* FMAX_F32 */ "fmaxf",
+/* FMAX_F64 */ "fmax",
+/* FMAX_F80 */ nullptr,
+/* FMAX_F128 */ "fmaxl",
+/* FMAX_PPCF128 */ nullptr,
+/* FPEXT_F32_PPCF128 */ nullptr,
+/* FPEXT_F64_PPCF128 */ nullptr,
+/* FPEXT_F64_F128 */ "__extenddftf2",
+/* FPEXT_F32_F128 */ "__extendsftf2",
+/* FPEXT_F32_F64 */ "__extendsfdf2",
+/* FPEXT_F16_F32 */ "__gnu_h2f_ieee",
+/* FPROUND_F32_F16 */ "__gnu_f2h_ieee",
+/* FPROUND_F64_F16 */ nullptr,
+/* FPROUND_F80_F16 */ nullptr,
+/* FPROUND_F128_F16 */ nullptr,
+/* FPROUND_PPCF128_F16 */ nullptr,
+/* FPROUND_F64_F32 */ "__truncdfsf2",
+/* FPROUND_F80_F32 */ "__truncxfsf2",
+/* FPROUND_F128_F32 */ "__trunctfsf2",
+/* FPROUND_PPCF128_F32 */ nullptr,
+/* FPROUND_F80_F64 */ "__truncxfdf2",
+/* FPROUND_F128_F64 */ "__trunctfdf2",
+/* FPROUND_PPCF128_F64 */ nullptr,
+/* FPTOSINT_F32_I32 */ "__fixsfsi",
+/* FPTOSINT_F32_I64 */ "__fixsfdi",
+/* FPTOSINT_F32_I128 */ "__fixsfti",
+/* FPTOSINT_F64_I32 */ "__fixdfsi",
+/* FPTOSINT_F64_I64 */ "__fixdfdi",
+/* FPTOSINT_F64_I128 */ "__fixdfti",
+/* FPTOSINT_F80_I32 */ "__fixxfsi",
+/* FPTOSINT_F80_I64 */ "__fixxfdi",
+/* FPTOSINT_F80_I128 */ "__fixxfti",
+/* FPTOSINT_F128_I32 */ "__fixtfsi",
+/* FPTOSINT_F128_I64 */ "__fixtfdi",
+/* FPTOSINT_F128_I128 */ "__fixtfti",
+/* FPTOSINT_PPCF128_I32 */ nullptr,
+/* FPTOSINT_PPCF128_I64 */ nullptr,
+/* FPTOSINT_PPCF128_I128 */ nullptr,
+/* FPTOUINT_F32_I32 */ "__fixunssfsi",
+/* FPTOUINT_F32_I64 */ "__fixunssfdi",
+/* FPTOUINT_F32_I128 */ "__fixunssfti",
+/* FPTOUINT_F64_I32 */ "__fixunsdfsi",
+/* FPTOUINT_F64_I64 */ "__fixunsdfdi",
+/* FPTOUINT_F64_I128 */ "__fixunsdfti",
+/* FPTOUINT_F80_I32 */ "__fixunsxfsi",
+/* FPTOUINT_F80_I64 */ "__fixunsxfdi",
+/* FPTOUINT_F80_I128 */ "__fixunsxfti",
+/* FPTOUINT_F128_I32 */ "__fixunstfsi",
+/* FPTOUINT_F128_I64 */ "__fixunstfdi",
+/* FPTOUINT_F128_I128 */ "__fixunstfti",
+/* FPTOUINT_PPCF128_I32 */ nullptr,
+/* FPTOUINT_PPCF128_I64 */ nullptr,
+/* FPTOUINT_PPCF128_I128 */ nullptr,
+/* SINTTOFP_I32_F32 */ "__floatsisf",
+/* SINTTOFP_I32_F64 */ "__floatsidf",
+/* SINTTOFP_I32_F80 */ nullptr,
+/* SINTTOFP_I32_F128 */ "__floatsitf",
+/* SINTTOFP_I32_PPCF128 */ nullptr,
+/* SINTTOFP_I64_F32 */ "__floatdisf",
+/* SINTTOFP_I64_F64 */ "__floatdidf",
+/* SINTTOFP_I64_F80 */ nullptr,
+/* SINTTOFP_I64_F128 */ "__floatditf",
+/* SINTTOFP_I64_PPCF128 */ nullptr,
+/* SINTTOFP_I128_F32 */ "__floattisf",
+/* SINTTOFP_I128_F64 */ "__floattidf",
+/* SINTTOFP_I128_F80 */ nullptr,
+/* SINTTOFP_I128_F128 */ "__floattitf",
+/* SINTTOFP_I128_PPCF128 */ nullptr,
+/* UINTTOFP_I32_F32 */ "__floatunsisf",
+/* UINTTOFP_I32_F64 */ "__floatunsidf",
+/* UINTTOFP_I32_F80 */ nullptr,
+/* UINTTOFP_I32_F128 */ "__floatunsitf",
+/* UINTTOFP_I32_PPCF128 */ nullptr,
+/* UINTTOFP_I64_F32 */ "__floatundisf",
+/* UINTTOFP_I64_F64 */ "__floatundidf",
+/* UINTTOFP_I64_F80 */ nullptr,
+/* UINTTOFP_I64_F128 */ "__floatunditf",
+/* UINTTOFP_I64_PPCF128 */ nullptr,
+/* UINTTOFP_I128_F32 */ "__floatuntisf",
+/* UINTTOFP_I128_F64 */ "__floatuntidf",
+/* UINTTOFP_I128_F80 */ nullptr,
+/* UINTTOFP_I128_F128 */ "__floatuntitf",
+/* UINTTOFP_I128_PPCF128 */ nullptr,
+/* OEQ_F32 */ "__eqsf2",
+/* OEQ_F64 */ "__eqdf2",
+/* OEQ_F128 */ "__eqtf2",
+/* OEQ_PPCF128 */ nullptr,
+/* UNE_F32 */ "__nesf2",
+/* UNE_F64 */ "__nedf2",
+/* UNE_F128 */ "__netf2",
+/* UNE_PPCF128 */ nullptr,
+/* OGE_F32 */ "__gesf2",
+/* OGE_F64 */ "__gedf2",
+/* OGE_F128 */ "__getf2",
+/* OGE_PPCF128 */ nullptr,
+/* OLT_F32 */ "__ltsf2",
+/* OLT_F64 */ "__ltdf2",
+/* OLT_F128 */ "__lttf2",
+/* OLT_PPCF128 */ nullptr,
+/* OLE_F32 */ "__lesf2",
+/* OLE_F64 */ "__ledf2",
+/* OLE_F128 */ "__letf2",
+/* OLE_PPCF128 */ nullptr,
+/* OGT_F32 */ "__gtsf2",
+/* OGT_F64 */ "__gtdf2",
+/* OGT_F128 */ "__gttf2",
+/* OGT_PPCF128 */ nullptr,
+/* UO_F32 */ "__unordsf2",
+/* UO_F64 */ "__unorddf2",
+/* UO_F128 */ "__unordtf2",
+/* UO_PPCF128 */ nullptr,
+/* O_F32 */ "__unordsf2",
+/* O_F64 */ "__unorddf2",
+/* O_F128 */ "__unordtf2",
+/* O_PPCF128 */ nullptr,
+/* MEMCPY */ "memcpy",
+/* MEMMOVE */ "memset",
+/* MEMSET */ "memmove",
+/* BZERO */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr,
+/* MEMCPY_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr,
+/* MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_1 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_2 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_4 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_8 */ nullptr,
+/* MEMSET_ELEMENT_UNORDERED_ATOMIC_16 */ nullptr,
+/* UNWIND_RESUME */ "_Unwind_Resume",
+/* SYNC_VAL_COMPARE_AND_SWAP_1 */ "__sync_val_compare_and_swap_1",
+/* SYNC_VAL_COMPARE_AND_SWAP_2 */ "__sync_val_compare_and_swap_2",
+/* SYNC_VAL_COMPARE_AND_SWAP_4 */ "__sync_val_compare_and_swap_4",
+/* SYNC_VAL_COMPARE_AND_SWAP_8 */ "__sync_val_compare_and_swap_8",
+/* SYNC_VAL_COMPARE_AND_SWAP_16 */ "__sync_val_compare_and_swap_16",
+/* SYNC_LOCK_TEST_AND_SET_1 */ "__sync_lock_test_and_set_1",
+/* SYNC_LOCK_TEST_AND_SET_2 */ "__sync_lock_test_and_set_2",
+/* SYNC_LOCK_TEST_AND_SET_4 */ "__sync_lock_test_and_set_4",
+/* SYNC_LOCK_TEST_AND_SET_8 */ "__sync_lock_test_and_set_8",
+/* SYNC_LOCK_TEST_AND_SET_16 */ "__sync_lock_test_and_set_16",
+/* SYNC_FETCH_AND_ADD_1 */ "__sync_fetch_and_add_1",
+/* SYNC_FETCH_AND_ADD_2 */ "__sync_fetch_and_add_2",
+/* SYNC_FETCH_AND_ADD_4 */ "__sync_fetch_and_add_4",
+/* SYNC_FETCH_AND_ADD_8 */ "__sync_fetch_and_add_8",
+/* SYNC_FETCH_AND_ADD_16 */ "__sync_fetch_and_add_16",
+/* SYNC_FETCH_AND_SUB_1 */ "__sync_fetch_and_sub_1",
+/* SYNC_FETCH_AND_SUB_2 */ "__sync_fetch_and_sub_2",
+/* SYNC_FETCH_AND_SUB_4 */ "__sync_fetch_and_sub_4",
+/* SYNC_FETCH_AND_SUB_8 */ "__sync_fetch_and_sub_8",
+/* SYNC_FETCH_AND_SUB_16 */ "__sync_fetch_and_sub_16",
+/* SYNC_FETCH_AND_AND_1 */ "__sync_fetch_and_and_1",
+/* SYNC_FETCH_AND_AND_2 */ "__sync_fetch_and_and_2",
+/* SYNC_FETCH_AND_AND_4 */ "__sync_fetch_and_and_4",
+/* SYNC_FETCH_AND_AND_8 */ "__sync_fetch_and_and_8",
+/* SYNC_FETCH_AND_AND_16 */ "__sync_fetch_and_and_16",
+/* SYNC_FETCH_AND_OR_1 */ "__sync_fetch_and_or_1",
+/* SYNC_FETCH_AND_OR_2 */ "__sync_fetch_and_or_2",
+/* SYNC_FETCH_AND_OR_4 */ "__sync_fetch_and_or_4",
+/* SYNC_FETCH_AND_OR_8 */ "__sync_fetch_and_or_8",
+/* SYNC_FETCH_AND_OR_16 */ "__sync_fetch_and_or_16",
+/* SYNC_FETCH_AND_XOR_1 */ "__sync_fetch_and_xor_1",
+/* SYNC_FETCH_AND_XOR_2 */ "__sync_fetch_and_xor_2",
+/* SYNC_FETCH_AND_XOR_4 */ "__sync_fetch_and_xor_4",
+/* SYNC_FETCH_AND_XOR_8 */ "__sync_fetch_and_xor_8",
+/* SYNC_FETCH_AND_XOR_16 */ "__sync_fetch_and_xor_16",
+/* SYNC_FETCH_AND_NAND_1 */ "__sync_fetch_and_nand_1",
+/* SYNC_FETCH_AND_NAND_2 */ "__sync_fetch_and_nand_2",
+/* SYNC_FETCH_AND_NAND_4 */ "__sync_fetch_and_nand_4",
+/* SYNC_FETCH_AND_NAND_8 */ "__sync_fetch_and_nand_8",
+/* SYNC_FETCH_AND_NAND_16 */ "__sync_fetch_and_nand_16",
+/* SYNC_FETCH_AND_MAX_1 */ "__sync_fetch_and_max_1",
+/* SYNC_FETCH_AND_MAX_2 */ "__sync_fetch_and_max_2",
+/* SYNC_FETCH_AND_MAX_4 */ "__sync_fetch_and_max_4",
+/* SYNC_FETCH_AND_MAX_8 */ "__sync_fetch_and_max_8",
+/* SYNC_FETCH_AND_MAX_16 */ "__sync_fetch_and_max_16",
+/* SYNC_FETCH_AND_UMAX_1 */ "__sync_fetch_and_umax_1",
+/* SYNC_FETCH_AND_UMAX_2 */ "__sync_fetch_and_umax_2",
+/* SYNC_FETCH_AND_UMAX_4 */ "__sync_fetch_and_umax_4",
+/* SYNC_FETCH_AND_UMAX_8 */ "__sync_fetch_and_umax_8",
+/* SYNC_FETCH_AND_UMAX_16 */ "__sync_fetch_and_umax_16",
+/* SYNC_FETCH_AND_MIN_1 */ "__sync_fetch_and_min_1",
+/* SYNC_FETCH_AND_MIN_2 */ "__sync_fetch_and_min_2",
+/* SYNC_FETCH_AND_MIN_4 */ "__sync_fetch_and_min_4",
+/* SYNC_FETCH_AND_MIN_8 */ "__sync_fetch_and_min_8",
+/* SYNC_FETCH_AND_MIN_16 */ "__sync_fetch_and_min_16",
+/* SYNC_FETCH_AND_UMIN_1 */ "__sync_fetch_and_umin_1",
+/* SYNC_FETCH_AND_UMIN_2 */ "__sync_fetch_and_umin_2",
+/* SYNC_FETCH_AND_UMIN_4 */ "__sync_fetch_and_umin_4",
+/* SYNC_FETCH_AND_UMIN_8 */ "__sync_fetch_and_umin_8",
+/* SYNC_FETCH_AND_UMIN_16 */ "__sync_fetch_and_umin_16",
+
+/* ATOMIC_LOAD */ "__atomic_load",
+/* ATOMIC_LOAD_1 */ "__atomic_load_1",
+/* ATOMIC_LOAD_2 */ "__atomic_load_2",
+/* ATOMIC_LOAD_4 */ "__atomic_load_4",
+/* ATOMIC_LOAD_8 */ "__atomic_load_8",
+/* ATOMIC_LOAD_16 */ "__atomic_load_16",
+
+/* ATOMIC_STORE */ "__atomic_store",
+/* ATOMIC_STORE_1 */ "__atomic_store_1",
+/* ATOMIC_STORE_2 */ "__atomic_store_2",
+/* ATOMIC_STORE_4 */ "__atomic_store_4",
+/* ATOMIC_STORE_8 */ "__atomic_store_8",
+/* ATOMIC_STORE_16 */ "__atomic_store_16",
+
+/* ATOMIC_EXCHANGE */ "__atomic_exchange",
+/* ATOMIC_EXCHANGE_1 */ "__atomic_exchange_1",
+/* ATOMIC_EXCHANGE_2 */ "__atomic_exchange_2",
+/* ATOMIC_EXCHANGE_4 */ "__atomic_exchange_4",
+/* ATOMIC_EXCHANGE_8 */ "__atomic_exchange_8",
+/* ATOMIC_EXCHANGE_16 */ "__atomic_exchange_16",
+
+/* ATOMIC_COMPARE_EXCHANGE */ "__atomic_compare_exchange",
+/* ATOMIC_COMPARE_EXCHANGE_1 */ "__atomic_compare_exchange_1",
+/* ATOMIC_COMPARE_EXCHANGE_2 */ "__atomic_compare_exchange_2",
+/* ATOMIC_COMPARE_EXCHANGE_4 */ "__atomic_compare_exchange_4",
+/* ATOMIC_COMPARE_EXCHANGE_8 */ "__atomic_compare_exchange_8",
+/* ATOMIC_COMPARE_EXCHANGE_16 */ "__atomic_compare_exchange_16",
+
+/* ATOMIC_FETCH_ADD_1 */ "__atomic_fetch_add_1",
+/* ATOMIC_FETCH_ADD_2 */ "__atomic_fetch_add_2",
+/* ATOMIC_FETCH_ADD_4 */ "__atomic_fetch_add_4",
+/* ATOMIC_FETCH_ADD_8 */ "__atomic_fetch_add_8",
+/* ATOMIC_FETCH_ADD_16 */ "__atomic_fetch_add_16",
+/* ATOMIC_FETCH_SUB_1 */ "__atomic_fetch_sub_1",
+/* ATOMIC_FETCH_SUB_2 */ "__atomic_fetch_sub_2",
+/* ATOMIC_FETCH_SUB_4 */ "__atomic_fetch_sub_4",
+/* ATOMIC_FETCH_SUB_8 */ "__atomic_fetch_sub_8",
+/* ATOMIC_FETCH_SUB_16 */ "__atomic_fetch_sub_16",
+/* ATOMIC_FETCH_AND_1 */ "__atomic_fetch_and_1",
+/* ATOMIC_FETCH_AND_2 */ "__atomic_fetch_and_2",
+/* ATOMIC_FETCH_AND_4 */ "__atomic_fetch_and_4",
+/* ATOMIC_FETCH_AND_8 */ "__atomic_fetch_and_8",
+/* ATOMIC_FETCH_AND_16 */ "__atomic_fetch_and_16",
+/* ATOMIC_FETCH_OR_1 */ "__atomic_fetch_or_1",
+/* ATOMIC_FETCH_OR_2 */ "__atomic_fetch_or_2",
+/* ATOMIC_FETCH_OR_4 */ "__atomic_fetch_or_4",
+/* ATOMIC_FETCH_OR_8 */ "__atomic_fetch_or_8",
+/* ATOMIC_FETCH_OR_16 */ "__atomic_fetch_or_16",
+/* ATOMIC_FETCH_XOR_1 */ "__atomic_fetch_xor_1",
+/* ATOMIC_FETCH_XOR_2 */ "__atomic_fetch_xor_2",
+/* ATOMIC_FETCH_XOR_4 */ "__atomic_fetch_xor_4",
+/* ATOMIC_FETCH_XOR_8 */ "__atomic_fetch_xor_8",
+/* ATOMIC_FETCH_XOR_16 */ "__atomic_fetch_xor_16",
+/* ATOMIC_FETCH_NAND_1 */ "__atomic_fetch_nand_1",
+/* ATOMIC_FETCH_NAND_2 */ "__atomic_fetch_nand_2",
+/* ATOMIC_FETCH_NAND_4 */ "__atomic_fetch_nand_4",
+/* ATOMIC_FETCH_NAND_8 */ "__atomic_fetch_nand_8",
+/* ATOMIC_FETCH_NAND_16 */ "__atomic_fetch_nand_16",
+
+/* STACKPROTECTOR_CHECK_FAIL */ "__stack_chk_fail",
+
+/* DEOPTIMIZE */ "__llvm_deoptimize",
+};
+
+void llvm::GetSignature(const WebAssemblySubtarget &Subtarget,
+ RTLIB::Libcall LC, SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params) {
+ assert(Rets.empty());
+ assert(Params.empty());
+
+ WebAssembly::ExprType iPTR = Subtarget.hasAddr64() ?
+ WebAssembly::ExprType::I64 :
+ WebAssembly::ExprType::I32;
+
+ switch (RuntimeLibcallSignatures[LC]) {
+ case func:
+ break;
+ case f32_func_f32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f32_func_f64:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case f32_func_i32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f32_func_i64:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case f32_func_i16:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f64_func_f32:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f64_func_f64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case f64_func_i32:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f64_func_i64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i32_func_f32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i32_func_f64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i32_func_i32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i64_func_f32:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i64_func_f64:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i64_func_i64:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case f32_func_f32_f32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f32_func_f32_i32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f32_func_i64_i64:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case f64_func_f64_f64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case f64_func_f64_i32:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case f64_func_i64_i64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i16_func_f32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i8_func_i8_i8:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case func_f32_iPTR_iPTR:
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType(iPTR));
+ break;
+ case func_f64_iPTR_iPTR:
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType(iPTR));
+ break;
+ case i16_func_i16_i16:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i32_func_f32_f32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i32_func_f64_f64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i32_func_i32_i32:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i64_func_i64_i64:
+ Rets.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_f32:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case i64_i64_func_f64:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case i16_i16_func_i16_i16:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I32);
+ Rets.push_back(wasm::ValType::I32);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i32_i32_func_i32_i32:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I32);
+ Rets.push_back(wasm::ValType::I32);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case i64_i64_func_i64_i64:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_i64_i64_i64_i64:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_i64_i64_func_i64_i64_i64_i64:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i64_i64_func_i64_i64_i32:
+#if 0 // TODO: Enable this when wasm gets multiple-return-value support.
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+ Rets.push_back(wasm::ValType::I64);
+#else
+ Params.push_back(wasm::ValType(iPTR));
+#endif
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case iPTR_func_iPTR_i32_iPTR:
+ Rets.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType(iPTR));
+ break;
+ case iPTR_func_iPTR_iPTR_iPTR:
+ Rets.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType(iPTR));
+ break;
+ case f32_func_f32_f32_f32:
+ Rets.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case f64_func_f64_f64_f64:
+ Rets.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case func_i64_i64_iPTR_iPTR:
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType(iPTR));
+ break;
+ case func_iPTR_f32:
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::F32);
+ break;
+ case func_iPTR_f64:
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::F64);
+ break;
+ case func_iPTR_i32:
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::I32);
+ break;
+ case func_iPTR_i64:
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case func_iPTR_i64_i64:
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case func_iPTR_i64_i64_i64_i64:
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case func_iPTR_i64_i64_i64_i64_i64_i64:
+ Params.push_back(wasm::ValType(iPTR));
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i32_func_i64_i64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case i32_func_i64_i64_i64_i64:
+ Rets.push_back(wasm::ValType::I32);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ Params.push_back(wasm::ValType::I64);
+ break;
+ case unsupported:
+ llvm_unreachable("unsupported runtime library signature");
+ }
+}
+
+void llvm::GetSignature(const WebAssemblySubtarget &Subtarget, const char *Name,
+ SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params) {
+ assert(strcmp(RuntimeLibcallNames[RTLIB::DEOPTIMIZE], "__llvm_deoptimize") ==
+ 0);
+
+ for (size_t i = 0, e = RTLIB::UNKNOWN_LIBCALL; i < e; ++i)
+ if (RuntimeLibcallNames[i] && strcmp(RuntimeLibcallNames[i], Name) == 0)
+ return GetSignature(Subtarget, RTLIB::Libcall(i), Rets, Params);
+
+ llvm_unreachable("unexpected runtime library name");
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h
new file mode 100644
index 000000000000..129067604784
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRuntimeLibcallSignatures.h
@@ -0,0 +1,37 @@
+// CodeGen/RuntimeLibcallSignatures.h - R.T. Lib. Call Signatures -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file provides signature information for runtime libcalls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_RUNTIME_LIBCALL_SIGNATURES_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_RUNTIME_LIBCALL_SIGNATURES_H
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+
+namespace llvm {
+
+class WebAssemblySubtarget;
+
+extern void GetSignature(const WebAssemblySubtarget &Subtarget,
+ RTLIB::Libcall LC,
+ SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params);
+
+extern void GetSignature(const WebAssemblySubtarget &Subtarget,
+ const char *Name, SmallVectorImpl<wasm::ValType> &Rets,
+ SmallVectorImpl<wasm::ValType> &Params);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp
new file mode 100644
index 000000000000..fae9c6100510
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.cpp
@@ -0,0 +1,20 @@
+//===-- WebAssemblySelectionDAGInfo.cpp - WebAssembly SelectionDAG Info ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements the WebAssemblySelectionDAGInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetMachine.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-selectiondag-info"
+
+WebAssemblySelectionDAGInfo::~WebAssemblySelectionDAGInfo() {}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h
new file mode 100644
index 000000000000..533c66b7a22f
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySelectionDAGInfo.h
@@ -0,0 +1,30 @@
+//=- WebAssemblySelectionDAGInfo.h - WebAssembly SelectionDAG Info -*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the WebAssembly subclass for
+/// SelectionDAGTargetInfo.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSELECTIONDAGINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSELECTIONDAGINFO_H
+
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+
+namespace llvm {
+
+class WebAssemblySelectionDAGInfo final : public SelectionDAGTargetInfo {
+public:
+ ~WebAssemblySelectionDAGInfo() override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
new file mode 100644
index 000000000000..c4b9e915b41e
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
@@ -0,0 +1,126 @@
+//=- WebAssemblySetP2AlignOperands.cpp - Set alignments on loads and stores -=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file sets the p2align operands on load and store instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-set-p2align-operands"
+
+namespace {
+class WebAssemblySetP2AlignOperands final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblySetP2AlignOperands() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override {
+ return "WebAssembly Set p2align Operands";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addPreservedID(MachineDominatorsID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+} // end anonymous namespace
+
+char WebAssemblySetP2AlignOperands::ID = 0;
+FunctionPass *llvm::createWebAssemblySetP2AlignOperands() {
+ return new WebAssemblySetP2AlignOperands();
+}
+
+static void RewriteP2Align(MachineInstr &MI, unsigned OperandNo) {
+ assert(MI.getOperand(OperandNo).getImm() == 0 &&
+ "ISel should set p2align operands to 0");
+ assert(MI.hasOneMemOperand() &&
+ "Load and store instructions have exactly one mem operand");
+ assert((*MI.memoperands_begin())->getSize() ==
+ (UINT64_C(1)
+ << WebAssembly::GetDefaultP2Align(MI.getOpcode())) &&
+ "Default p2align value should be natural");
+ assert(MI.getDesc().OpInfo[OperandNo].OperandType ==
+ WebAssembly::OPERAND_P2ALIGN &&
+ "Load and store instructions should have a p2align operand");
+ uint64_t P2Align = Log2_64((*MI.memoperands_begin())->getAlignment());
+
+ // WebAssembly does not currently support supernatural alignment.
+ P2Align = std::min(
+ P2Align, uint64_t(WebAssembly::GetDefaultP2Align(MI.getOpcode())));
+
+ MI.getOperand(OperandNo).setImm(P2Align);
+}
+
+bool WebAssemblySetP2AlignOperands::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Set p2align Operands **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ bool Changed = false;
+
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::LOAD_I32:
+ case WebAssembly::LOAD_I64:
+ case WebAssembly::LOAD_F32:
+ case WebAssembly::LOAD_F64:
+ case WebAssembly::LOAD8_S_I32:
+ case WebAssembly::LOAD8_U_I32:
+ case WebAssembly::LOAD16_S_I32:
+ case WebAssembly::LOAD16_U_I32:
+ case WebAssembly::LOAD8_S_I64:
+ case WebAssembly::LOAD8_U_I64:
+ case WebAssembly::LOAD16_S_I64:
+ case WebAssembly::LOAD16_U_I64:
+ case WebAssembly::LOAD32_S_I64:
+ case WebAssembly::LOAD32_U_I64:
+ case WebAssembly::ATOMIC_LOAD_I32:
+ case WebAssembly::ATOMIC_LOAD8_U_I32:
+ case WebAssembly::ATOMIC_LOAD16_U_I32:
+ case WebAssembly::ATOMIC_LOAD_I64:
+ case WebAssembly::ATOMIC_LOAD8_U_I64:
+ case WebAssembly::ATOMIC_LOAD16_U_I64:
+ case WebAssembly::ATOMIC_LOAD32_U_I64:
+ RewriteP2Align(MI, WebAssembly::LoadP2AlignOperandNo);
+ break;
+ case WebAssembly::STORE_I32:
+ case WebAssembly::STORE_I64:
+ case WebAssembly::STORE_F32:
+ case WebAssembly::STORE_F64:
+ case WebAssembly::STORE8_I32:
+ case WebAssembly::STORE16_I32:
+ case WebAssembly::STORE8_I64:
+ case WebAssembly::STORE16_I64:
+ case WebAssembly::STORE32_I64:
+ RewriteP2Align(MI, WebAssembly::StoreP2AlignOperandNo);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
new file mode 100644
index 000000000000..22a5a9099e72
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyStoreResults.cpp
@@ -0,0 +1,202 @@
+//===-- WebAssemblyStoreResults.cpp - Optimize using store result values --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements an optimization pass using store result values.
+///
+/// WebAssembly's store instructions return the stored value. This is to enable
+/// an optimization wherein uses of the stored value can be replaced by uses of
+/// the store's result value, making the stored value register more likely to
+/// be single-use, thus more likely to be useful to register stackifying, and
+/// potentially also exposing the store to register stackifying. These both can
+/// reduce get_local/set_local traffic.
+///
+/// This pass also performs this optimization for memcpy, memmove, and memset
+/// calls, since the LLVM intrinsics for these return void so they can't use the
+/// returned attribute and consequently aren't handled by the OptimizeReturned
+/// pass.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "WebAssemblySubtarget.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-store-results"
+
+namespace {
+class WebAssemblyStoreResults final : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ WebAssemblyStoreResults() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override { return "WebAssembly Store Results"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<MachineBlockFrequencyInfo>();
+ AU.addPreserved<MachineBlockFrequencyInfo>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<LiveIntervals>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addPreserved<LiveIntervals>();
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+};
+} // end anonymous namespace
+
+char WebAssemblyStoreResults::ID = 0;
+FunctionPass *llvm::createWebAssemblyStoreResults() {
+ return new WebAssemblyStoreResults();
+}
+
+// Replace uses of FromReg with ToReg if they are dominated by MI.
+static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
+ unsigned FromReg, unsigned ToReg,
+ const MachineRegisterInfo &MRI,
+ MachineDominatorTree &MDT,
+ LiveIntervals &LIS) {
+ bool Changed = false;
+
+ LiveInterval *FromLI = &LIS.getInterval(FromReg);
+ LiveInterval *ToLI = &LIS.getInterval(ToReg);
+
+ SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot();
+ VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx);
+
+ SmallVector<SlotIndex, 4> Indices;
+
+ for (auto I = MRI.use_nodbg_begin(FromReg), E = MRI.use_nodbg_end(); I != E;) {
+ MachineOperand &O = *I++;
+ MachineInstr *Where = O.getParent();
+
+ // Check that MI dominates the instruction in the normal way.
+ if (&MI == Where || !MDT.dominates(&MI, Where))
+ continue;
+
+ // If this use gets a different value, skip it.
+ SlotIndex WhereIdx = LIS.getInstructionIndex(*Where);
+ VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx);
+ if (WhereVNI && WhereVNI != FromVNI)
+ continue;
+
+ // Make sure ToReg isn't clobbered before it gets there.
+ VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx);
+ if (ToVNI && ToVNI != FromVNI)
+ continue;
+
+ Changed = true;
+ DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
+ << MI << "\n");
+ O.setReg(ToReg);
+
+ // If the store's def was previously dead, it is no longer.
+ if (!O.isUndef()) {
+ MI.getOperand(0).setIsDead(false);
+
+ Indices.push_back(WhereIdx.getRegSlot());
+ }
+ }
+
+ if (Changed) {
+ // Extend ToReg's liveness.
+ LIS.extendToIndices(*ToLI, Indices);
+
+ // Shrink FromReg's liveness.
+ LIS.shrinkToUses(FromLI);
+
+ // If we replaced all dominated uses, FromReg is now killed at MI.
+ if (!FromLI->liveAt(FromIdx.getDeadSlot()))
+ MI.addRegisterKilled(FromReg,
+ MBB.getParent()->getSubtarget<WebAssemblySubtarget>()
+ .getRegisterInfo());
+ }
+
+ return Changed;
+}
+
+static bool optimizeCall(MachineBasicBlock &MBB, MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ MachineDominatorTree &MDT,
+ LiveIntervals &LIS,
+ const WebAssemblyTargetLowering &TLI,
+ const TargetLibraryInfo &LibInfo) {
+ MachineOperand &Op1 = MI.getOperand(1);
+ if (!Op1.isSymbol())
+ return false;
+
+ StringRef Name(Op1.getSymbolName());
+ bool callReturnsInput = Name == TLI.getLibcallName(RTLIB::MEMCPY) ||
+ Name == TLI.getLibcallName(RTLIB::MEMMOVE) ||
+ Name == TLI.getLibcallName(RTLIB::MEMSET);
+ if (!callReturnsInput)
+ return false;
+
+ LibFunc Func;
+ if (!LibInfo.getLibFunc(Name, Func))
+ return false;
+
+ unsigned FromReg = MI.getOperand(2).getReg();
+ unsigned ToReg = MI.getOperand(0).getReg();
+ if (MRI.getRegClass(FromReg) != MRI.getRegClass(ToReg))
+ report_fatal_error("Store results: call to builtin function with wrong "
+ "signature, from/to mismatch");
+ return ReplaceDominatedUses(MBB, MI, FromReg, ToReg, MRI, MDT, LIS);
+}
+
+bool WebAssemblyStoreResults::runOnMachineFunction(MachineFunction &MF) {
+ DEBUG({
+ dbgs() << "********** Store Results **********\n"
+ << "********** Function: " << MF.getName() << '\n';
+ });
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+ const WebAssemblyTargetLowering &TLI =
+ *MF.getSubtarget<WebAssemblySubtarget>().getTargetLowering();
+ const auto &LibInfo = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ LiveIntervals &LIS = getAnalysis<LiveIntervals>();
+ bool Changed = false;
+
+ // We don't preserve SSA form.
+ MRI.leaveSSA();
+
+ assert(MRI.tracksLiveness() && "StoreResults expects liveness tracking");
+
+ for (auto &MBB : MF) {
+ DEBUG(dbgs() << "Basic Block: " << MBB.getName() << '\n');
+ for (auto &MI : MBB)
+ switch (MI.getOpcode()) {
+ default:
+ break;
+ case WebAssembly::CALL_I32:
+ case WebAssembly::CALL_I64:
+ Changed |= optimizeCall(MBB, MI, MRI, MDT, LIS, TLI, LibInfo);
+ break;
+ }
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
new file mode 100644
index 000000000000..9e122a5f1574
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.cpp
@@ -0,0 +1,56 @@
+//===-- WebAssemblySubtarget.cpp - WebAssembly Subtarget Information ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements the WebAssembly-specific subclass of
+/// TargetSubtarget.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblySubtarget.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyInstrInfo.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm-subtarget"
+
+#define GET_SUBTARGETINFO_CTOR
+#define GET_SUBTARGETINFO_TARGET_DESC
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+WebAssemblySubtarget &
+WebAssemblySubtarget::initializeSubtargetDependencies(StringRef FS) {
+ // Determine default and user-specified characteristics
+
+ if (CPUString.empty())
+ CPUString = "generic";
+
+ ParseSubtargetFeatures(CPUString, FS);
+ return *this;
+}
+
+WebAssemblySubtarget::WebAssemblySubtarget(const Triple &TT,
+ const std::string &CPU,
+ const std::string &FS,
+ const TargetMachine &TM)
+ : WebAssemblyGenSubtargetInfo(TT, CPU, FS), HasSIMD128(false),
+ HasAtomics(false), HasNontrappingFPToInt(false), CPUString(CPU),
+ TargetTriple(TT), FrameLowering(),
+ InstrInfo(initializeSubtargetDependencies(FS)), TSInfo(),
+ TLInfo(TM, *this) {}
+
+bool WebAssemblySubtarget::enableMachineScheduler() const {
+ // Disable the MachineScheduler for now. Even with ShouldTrackPressure set and
+ // enableMachineSchedDefaultSched overridden, it appears to have an overall
+ // negative effect for the kinds of register optimizations we're doing.
+ return false;
+}
+
+bool WebAssemblySubtarget::useAA() const { return true; }
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
new file mode 100644
index 000000000000..a6bf0b6d54f6
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblySubtarget.h
@@ -0,0 +1,89 @@
+//=- WebAssemblySubtarget.h - Define Subtarget for the WebAssembly -*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares the WebAssembly-specific subclass of
+/// TargetSubtarget.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSUBTARGET_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYSUBTARGET_H
+
+#include "WebAssemblyFrameLowering.h"
+#include "WebAssemblyISelLowering.h"
+#include "WebAssemblyInstrInfo.h"
+#include "WebAssemblySelectionDAGInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include <string>
+
+#define GET_SUBTARGETINFO_HEADER
+#include "WebAssemblyGenSubtargetInfo.inc"
+
+namespace llvm {
+
+class WebAssemblySubtarget final : public WebAssemblyGenSubtargetInfo {
+ bool HasSIMD128;
+ bool HasAtomics;
+ bool HasNontrappingFPToInt;
+
+ /// String name of used CPU.
+ std::string CPUString;
+
+ /// What processor and OS we're targeting.
+ Triple TargetTriple;
+
+ WebAssemblyFrameLowering FrameLowering;
+ WebAssemblyInstrInfo InstrInfo;
+ WebAssemblySelectionDAGInfo TSInfo;
+ WebAssemblyTargetLowering TLInfo;
+
+ /// Initializes using CPUString and the passed in feature string so that we
+ /// can use initializer lists for subtarget initialization.
+ WebAssemblySubtarget &initializeSubtargetDependencies(StringRef FS);
+
+public:
+ /// This constructor initializes the data members to match that
+ /// of the specified triple.
+ WebAssemblySubtarget(const Triple &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM);
+
+ const WebAssemblySelectionDAGInfo *getSelectionDAGInfo() const override {
+ return &TSInfo;
+ }
+ const WebAssemblyFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
+ const WebAssemblyTargetLowering *getTargetLowering() const override {
+ return &TLInfo;
+ }
+ const WebAssemblyInstrInfo *getInstrInfo() const override {
+ return &InstrInfo;
+ }
+ const WebAssemblyRegisterInfo *getRegisterInfo() const override {
+ return &getInstrInfo()->getRegisterInfo();
+ }
+ const Triple &getTargetTriple() const { return TargetTriple; }
+ bool enableMachineScheduler() const override;
+ bool useAA() const override;
+
+ // Predicates used by WebAssemblyInstrInfo.td.
+ bool hasAddr64() const { return TargetTriple.isArch64Bit(); }
+ bool hasSIMD128() const { return HasSIMD128; }
+ bool hasAtomics() const { return HasAtomics; }
+ bool hasNontrappingFPToInt() const { return HasNontrappingFPToInt; }
+
+ /// Parses features string setting specified subtarget options. Definition of
+ /// function is auto generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
new file mode 100644
index 000000000000..d38cde74d2ec
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp
@@ -0,0 +1,301 @@
+//===- WebAssemblyTargetMachine.cpp - Define TargetMachine for WebAssembly -==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the WebAssembly-specific subclass of TargetMachine.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetMachine.h"
+#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssembly.h"
+#include "WebAssemblyTargetObjectFile.h"
+#include "WebAssemblyTargetTransformInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Scalar.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasm"
+
+// Emscripten's asm.js-style exception handling
+static cl::opt<bool> EnableEmException(
+ "enable-emscripten-cxx-exceptions",
+ cl::desc("WebAssembly Emscripten-style exception handling"),
+ cl::init(false));
+
+// Emscripten's asm.js-style setjmp/longjmp handling
+static cl::opt<bool> EnableEmSjLj(
+ "enable-emscripten-sjlj",
+ cl::desc("WebAssembly Emscripten-style setjmp/longjmp handling"),
+ cl::init(false));
+
+extern "C" void LLVMInitializeWebAssemblyTarget() {
+ // Register the target.
+ RegisterTargetMachine<WebAssemblyTargetMachine> X(
+ getTheWebAssemblyTarget32());
+ RegisterTargetMachine<WebAssemblyTargetMachine> Y(
+ getTheWebAssemblyTarget64());
+
+ // Register exception handling pass to opt
+ initializeWebAssemblyLowerEmscriptenEHSjLjPass(
+ *PassRegistry::getPassRegistry());
+}
+
+//===----------------------------------------------------------------------===//
+// WebAssembly Lowering public interface.
+//===----------------------------------------------------------------------===//
+
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::PIC_;
+ return *RM;
+}
+
+/// Create an WebAssembly architecture model.
+///
+WebAssemblyTargetMachine::WebAssemblyTargetMachine(
+ const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Optional<Reloc::Model> RM,
+ Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
+ : LLVMTargetMachine(T,
+ TT.isArch64Bit() ? "e-m:e-p:64:64-i64:64-n32:64-S128"
+ : "e-m:e-p:32:32-i64:64-n32:64-S128",
+ TT, CPU, FS, Options, getEffectiveRelocModel(RM),
+ CM ? *CM : CodeModel::Large, OL),
+ TLOF(TT.isOSBinFormatELF() ?
+ static_cast<TargetLoweringObjectFile*>(
+ new WebAssemblyTargetObjectFileELF()) :
+ static_cast<TargetLoweringObjectFile*>(
+ new WebAssemblyTargetObjectFile())) {
+ // WebAssembly type-checks instructions, but a noreturn function with a return
+ // type that doesn't match the context will cause a check failure. So we lower
+ // LLVM 'unreachable' to ISD::TRAP and then lower that to WebAssembly's
+ // 'unreachable' instructions which is meant for that case.
+ this->Options.TrapUnreachable = true;
+
+ // WebAssembly treats each function as an independent unit. Force
+ // -ffunction-sections, effectively, so that we can emit them independently.
+ if (!TT.isOSBinFormatELF()) {
+ this->Options.FunctionSections = true;
+ this->Options.DataSections = true;
+ this->Options.UniqueSectionNames = true;
+ }
+
+ initAsmInfo();
+
+ // Note that we don't use setRequiresStructuredCFG(true). It disables
+ // optimizations than we're ok with, and want, such as critical edge
+ // splitting and tail merging.
+}
+
+WebAssemblyTargetMachine::~WebAssemblyTargetMachine() {}
+
+const WebAssemblySubtarget *
+WebAssemblyTargetMachine::getSubtargetImpl(const Function &F) const {
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
+
+ std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
+ ? CPUAttr.getValueAsString().str()
+ : TargetCPU;
+ std::string FS = !FSAttr.hasAttribute(Attribute::None)
+ ? FSAttr.getValueAsString().str()
+ : TargetFS;
+
+ auto &I = SubtargetMap[CPU + FS];
+ if (!I) {
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+ I = llvm::make_unique<WebAssemblySubtarget>(TargetTriple, CPU, FS, *this);
+ }
+ return I.get();
+}
+
+namespace {
+/// WebAssembly Code Generator Pass Configuration Options.
+class WebAssemblyPassConfig final : public TargetPassConfig {
+public:
+ WebAssemblyPassConfig(WebAssemblyTargetMachine &TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ WebAssemblyTargetMachine &getWebAssemblyTargetMachine() const {
+ return getTM<WebAssemblyTargetMachine>();
+ }
+
+ FunctionPass *createTargetRegisterAllocator(bool) override;
+
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ void addPostRegAlloc() override;
+ bool addGCPasses() override { return false; }
+ void addPreEmitPass() override;
+};
+} // end anonymous namespace
+
+TargetTransformInfo
+WebAssemblyTargetMachine::getTargetTransformInfo(const Function &F) {
+ return TargetTransformInfo(WebAssemblyTTIImpl(this, F));
+}
+
+TargetPassConfig *
+WebAssemblyTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new WebAssemblyPassConfig(*this, PM);
+}
+
+FunctionPass *WebAssemblyPassConfig::createTargetRegisterAllocator(bool) {
+ return nullptr; // No reg alloc
+}
+
+//===----------------------------------------------------------------------===//
+// The following functions are called from lib/CodeGen/Passes.cpp to modify
+// the CodeGen pass sequence.
+//===----------------------------------------------------------------------===//
+
+void WebAssemblyPassConfig::addIRPasses() {
+ if (TM->Options.ThreadModel == ThreadModel::Single)
+ // In "single" mode, atomics get lowered to non-atomics.
+ addPass(createLowerAtomicPass());
+ else
+ // Expand some atomic operations. WebAssemblyTargetLowering has hooks which
+ // control specifically what gets lowered.
+ addPass(createAtomicExpandPass());
+
+ // Lower .llvm.global_dtors into .llvm_global_ctors with __cxa_atexit calls.
+ addPass(createWebAssemblyLowerGlobalDtors());
+
+ // Fix function bitcasts, as WebAssembly requires caller and callee signatures
+ // to match.
+ addPass(createWebAssemblyFixFunctionBitcasts());
+
+ // Optimize "returned" function attributes.
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createWebAssemblyOptimizeReturned());
+
+ // If exception handling is not enabled and setjmp/longjmp handling is
+ // enabled, we lower invokes into calls and delete unreachable landingpad
+ // blocks. Lowering invokes when there is no EH support is done in
+ // TargetPassConfig::addPassesToHandleExceptions, but this runs after this
+ // function and SjLj handling expects all invokes to be lowered before.
+ if (!EnableEmException) {
+ addPass(createLowerInvokePass());
+ // The lower invoke pass may create unreachable code. Remove it in order not
+ // to process dead blocks in setjmp/longjmp handling.
+ addPass(createUnreachableBlockEliminationPass());
+ }
+
+ // Handle exceptions and setjmp/longjmp if enabled.
+ if (EnableEmException || EnableEmSjLj)
+ addPass(createWebAssemblyLowerEmscriptenEHSjLj(EnableEmException,
+ EnableEmSjLj));
+
+ TargetPassConfig::addIRPasses();
+}
+
+bool WebAssemblyPassConfig::addInstSelector() {
+ (void)TargetPassConfig::addInstSelector();
+ addPass(
+ createWebAssemblyISelDag(getWebAssemblyTargetMachine(), getOptLevel()));
+ // Run the argument-move pass immediately after the ScheduleDAG scheduler
+ // so that we can fix up the ARGUMENT instructions before anything else
+ // sees them in the wrong place.
+ addPass(createWebAssemblyArgumentMove());
+ // Set the p2align operands. This information is present during ISel, however
+ // it's inconvenient to collect. Collect it now, and update the immediate
+ // operands.
+ addPass(createWebAssemblySetP2AlignOperands());
+ return false;
+}
+
+void WebAssemblyPassConfig::addPostRegAlloc() {
+ // TODO: The following CodeGen passes don't currently support code containing
+ // virtual registers. Consider removing their restrictions and re-enabling
+ // them.
+
+ // Has no asserts of its own, but was not written to handle virtual regs.
+ disablePass(&ShrinkWrapID);
+
+ // These functions all require the NoVRegs property.
+ disablePass(&MachineCopyPropagationID);
+ disablePass(&PostRASchedulerID);
+ disablePass(&FuncletLayoutID);
+ disablePass(&StackMapLivenessID);
+ disablePass(&LiveDebugValuesID);
+ disablePass(&PatchableFunctionID);
+
+ TargetPassConfig::addPostRegAlloc();
+}
+
+void WebAssemblyPassConfig::addPreEmitPass() {
+ TargetPassConfig::addPreEmitPass();
+
+ // Now that we have a prologue and epilogue and all frame indices are
+ // rewritten, eliminate SP and FP. This allows them to be stackified,
+ // colored, and numbered with the rest of the registers.
+ addPass(createWebAssemblyReplacePhysRegs());
+
+ // Rewrite pseudo call_indirect instructions as real instructions.
+ // This needs to run before register stackification, because we change the
+ // order of the arguments.
+ addPass(createWebAssemblyCallIndirectFixup());
+
+ if (getOptLevel() != CodeGenOpt::None) {
+ // LiveIntervals isn't commonly run this late. Re-establish preconditions.
+ addPass(createWebAssemblyPrepareForLiveIntervals());
+
+ // Depend on LiveIntervals and perform some optimizations on it.
+ addPass(createWebAssemblyOptimizeLiveIntervals());
+
+ // Prepare store instructions for register stackifying.
+ addPass(createWebAssemblyStoreResults());
+
+ // Mark registers as representing wasm's value stack. This is a key
+ // code-compression technique in WebAssembly. We run this pass (and
+ // StoreResults above) very late, so that it sees as much code as possible,
+ // including code emitted by PEI and expanded by late tail duplication.
+ addPass(createWebAssemblyRegStackify());
+
+ // Run the register coloring pass to reduce the total number of registers.
+ // This runs after stackification so that it doesn't consider registers
+ // that become stackified.
+ addPass(createWebAssemblyRegColoring());
+ }
+
+ // Eliminate multiple-entry loops. Do this before inserting explicit get_local
+ // and set_local operators because we create a new variable that we want
+ // converted into a local.
+ addPass(createWebAssemblyFixIrreducibleControlFlow());
+
+ // Insert explicit get_local and set_local operators.
+ addPass(createWebAssemblyExplicitLocals());
+
+ // Sort the blocks of the CFG into topological order, a prerequisite for
+ // BLOCK and LOOP markers.
+ addPass(createWebAssemblyCFGSort());
+
+ // Insert BLOCK and LOOP markers.
+ addPass(createWebAssemblyCFGStackify());
+
+ // Lower br_unless into br_if.
+ addPass(createWebAssemblyLowerBrUnless());
+
+ // Perform the very last peephole optimizations on the code.
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createWebAssemblyPeephole());
+
+ // Create a mapping from LLVM CodeGen virtual registers to wasm registers.
+ addPass(createWebAssemblyRegNumbering());
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h
new file mode 100644
index 000000000000..dd826befd117
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetMachine.h
@@ -0,0 +1,53 @@
+// WebAssemblyTargetMachine.h - Define TargetMachine for WebAssembly -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares the WebAssembly-specific subclass of
+/// TargetMachine.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETMACHINE_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETMACHINE_H
+
+#include "WebAssemblySubtarget.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class WebAssemblyTargetMachine final : public LLVMTargetMachine {
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ mutable StringMap<std::unique_ptr<WebAssemblySubtarget>> SubtargetMap;
+
+public:
+ WebAssemblyTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM,
+ Optional<CodeModel::Model> CM, CodeGenOpt::Level OL,
+ bool JIT);
+
+ ~WebAssemblyTargetMachine() override;
+ const WebAssemblySubtarget *
+ getSubtargetImpl(const Function &F) const override;
+
+ // Pass Pipeline Configuration
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return TLOF.get();
+ }
+
+ TargetTransformInfo getTargetTransformInfo(const Function &F) override;
+
+ bool usesPhysRegsForPEI() const override { return false; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp
new file mode 100644
index 000000000000..b1fd108bc249
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.cpp
@@ -0,0 +1,30 @@
+//===-- WebAssemblyTargetObjectFile.cpp - WebAssembly Object Info ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the functions of the WebAssembly-specific subclass
+/// of TargetLoweringObjectFile.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetObjectFile.h"
+#include "WebAssemblyTargetMachine.h"
+using namespace llvm;
+
+void WebAssemblyTargetObjectFileELF::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ InitializeELF(TM.Options.UseInitArray);
+}
+
+void WebAssemblyTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileWasm::Initialize(Ctx, TM);
+ InitializeWasm();
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h
new file mode 100644
index 000000000000..ace87c9e442f
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetObjectFile.h
@@ -0,0 +1,36 @@
+//===-- WebAssemblyTargetObjectFile.h - WebAssembly Object Info -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares the WebAssembly-specific subclass of
+/// TargetLoweringObjectFile.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETOBJECTFILE_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+namespace llvm {
+
+class WebAssemblyTargetObjectFileELF final
+ : public TargetLoweringObjectFileELF {
+public:
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+};
+
+class WebAssemblyTargetObjectFile final : public TargetLoweringObjectFileWasm {
+public:
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
new file mode 100644
index 000000000000..2e002781f43d
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -0,0 +1,83 @@
+//===-- WebAssemblyTargetTransformInfo.cpp - WebAssembly-specific TTI -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines the WebAssembly-specific TargetTransformInfo
+/// implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyTargetTransformInfo.h"
+#include "llvm/CodeGen/CostTable.h"
+#include "llvm/Support/Debug.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "wasmtti"
+
+TargetTransformInfo::PopcntSupportKind
+WebAssemblyTTIImpl::getPopcntSupport(unsigned TyWidth) const {
+ assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
+ return TargetTransformInfo::PSK_FastHardware;
+}
+
+unsigned WebAssemblyTTIImpl::getNumberOfRegisters(bool Vector) {
+ unsigned Result = BaseT::getNumberOfRegisters(Vector);
+
+ // For SIMD, use at least 16 registers, as a rough guess.
+ if (Vector)
+ Result = std::max(Result, 16u);
+
+ return Result;
+}
+
+unsigned WebAssemblyTTIImpl::getRegisterBitWidth(bool Vector) const {
+ if (Vector && getST()->hasSIMD128())
+ return 128;
+
+ return 64;
+}
+
+unsigned WebAssemblyTTIImpl::getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
+ TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
+
+ unsigned Cost = BasicTTIImplBase<WebAssemblyTTIImpl>::getArithmeticInstrCost(
+ Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ switch (Opcode) {
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::Shl:
+ // SIMD128's shifts currently only accept a scalar shift count. For each
+ // element, we'll need to extract, op, insert. The following is a rough
+ // approxmation.
+ if (Opd2Info != TTI::OK_UniformValue &&
+ Opd2Info != TTI::OK_UniformConstantValue)
+ Cost = VTy->getNumElements() *
+ (TargetTransformInfo::TCC_Basic +
+ getArithmeticInstrCost(Opcode, VTy->getElementType()) +
+ TargetTransformInfo::TCC_Basic);
+ break;
+ }
+ }
+ return Cost;
+}
+
+unsigned WebAssemblyTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) {
+ unsigned Cost = BasicTTIImplBase::getVectorInstrCost(Opcode, Val, Index);
+
+ // SIMD128's insert/extract currently only take constant indices.
+ if (Index == -1u)
+ return Cost + 25 * TargetTransformInfo::TCC_Expensive;
+
+ return Cost;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
new file mode 100644
index 000000000000..7b35fc916133
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
@@ -0,0 +1,73 @@
+//==- WebAssemblyTargetTransformInfo.h - WebAssembly-specific TTI -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file a TargetTransformInfo::Concept conforming object specific
+/// to the WebAssembly target machine.
+///
+/// It uses the target's detailed information to provide more precise answers to
+/// certain TTI queries, while letting the target independent and default TTI
+/// implementations handle the rest.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETTRANSFORMINFO_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYTARGETTRANSFORMINFO_H
+
+#include "WebAssemblyTargetMachine.h"
+#include "llvm/CodeGen/BasicTTIImpl.h"
+#include <algorithm>
+
+namespace llvm {
+
+class WebAssemblyTTIImpl final : public BasicTTIImplBase<WebAssemblyTTIImpl> {
+ typedef BasicTTIImplBase<WebAssemblyTTIImpl> BaseT;
+ typedef TargetTransformInfo TTI;
+ friend BaseT;
+
+ const WebAssemblySubtarget *ST;
+ const WebAssemblyTargetLowering *TLI;
+
+ const WebAssemblySubtarget *getST() const { return ST; }
+ const WebAssemblyTargetLowering *getTLI() const { return TLI; }
+
+public:
+ WebAssemblyTTIImpl(const WebAssemblyTargetMachine *TM, const Function &F)
+ : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
+ TLI(ST->getTargetLowering()) {}
+
+ /// \name Scalar TTI Implementations
+ /// @{
+
+ // TODO: Implement more Scalar TTI for WebAssembly
+
+ TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(bool Vector);
+ unsigned getRegisterBitWidth(bool Vector) const;
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
+ ArrayRef<const Value *> Args = ArrayRef<const Value *>());
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp
new file mode 100644
index 000000000000..e32772d491cf
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.cpp
@@ -0,0 +1,97 @@
+//===-- WebAssemblyUtilities.cpp - WebAssembly Utility Functions ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements several utility functions for WebAssembly.
+///
+//===----------------------------------------------------------------------===//
+
+#include "WebAssemblyUtilities.h"
+#include "WebAssemblyMachineFunctionInfo.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+using namespace llvm;
+
+bool WebAssembly::isArgument(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::ARGUMENT_I32:
+ case WebAssembly::ARGUMENT_I64:
+ case WebAssembly::ARGUMENT_F32:
+ case WebAssembly::ARGUMENT_F64:
+ case WebAssembly::ARGUMENT_v16i8:
+ case WebAssembly::ARGUMENT_v8i16:
+ case WebAssembly::ARGUMENT_v4i32:
+ case WebAssembly::ARGUMENT_v4f32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool WebAssembly::isCopy(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::COPY_I32:
+ case WebAssembly::COPY_I64:
+ case WebAssembly::COPY_F32:
+ case WebAssembly::COPY_F64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool WebAssembly::isTee(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::TEE_I32:
+ case WebAssembly::TEE_I64:
+ case WebAssembly::TEE_F32:
+ case WebAssembly::TEE_F64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// Test whether MI is a child of some other node in an expression tree.
+bool WebAssembly::isChild(const MachineInstr &MI,
+ const WebAssemblyFunctionInfo &MFI) {
+ if (MI.getNumOperands() == 0)
+ return false;
+ const MachineOperand &MO = MI.getOperand(0);
+ if (!MO.isReg() || MO.isImplicit() || !MO.isDef())
+ return false;
+ unsigned Reg = MO.getReg();
+ return TargetRegisterInfo::isVirtualRegister(Reg) &&
+ MFI.isVRegStackified(Reg);
+}
+
+bool WebAssembly::isCallIndirect(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case WebAssembly::CALL_INDIRECT_VOID:
+ case WebAssembly::CALL_INDIRECT_I32:
+ case WebAssembly::CALL_INDIRECT_I64:
+ case WebAssembly::CALL_INDIRECT_F32:
+ case WebAssembly::CALL_INDIRECT_F64:
+ case WebAssembly::CALL_INDIRECT_v16i8:
+ case WebAssembly::CALL_INDIRECT_v8i16:
+ case WebAssembly::CALL_INDIRECT_v4i32:
+ case WebAssembly::CALL_INDIRECT_v4f32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+MachineBasicBlock *llvm::LoopBottom(const MachineLoop *Loop) {
+ MachineBasicBlock *Bottom = Loop->getHeader();
+ for (MachineBasicBlock *MBB : Loop->blocks())
+ if (MBB->getNumber() > Bottom->getNumber())
+ Bottom = MBB;
+ return Bottom;
+}
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h
new file mode 100644
index 000000000000..595491f1bf5b
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyUtilities.h
@@ -0,0 +1,43 @@
+//===-- WebAssemblyUtilities - WebAssembly Utility Functions ---*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the declaration of the WebAssembly-specific
+/// utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYUTILITIES_H
+#define LLVM_LIB_TARGET_WEBASSEMBLY_WEBASSEMBLYUTILITIES_H
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineInstr;
+class MachineLoop;
+class WebAssemblyFunctionInfo;
+
+namespace WebAssembly {
+
+bool isArgument(const MachineInstr &MI);
+bool isCopy(const MachineInstr &MI);
+bool isTee(const MachineInstr &MI);
+bool isChild(const MachineInstr &MI, const WebAssemblyFunctionInfo &MFI);
+bool isCallIndirect(const MachineInstr &MI);
+
+} // end namespace WebAssembly
+
+/// Return the "bottom" block of a loop. This differs from
+/// MachineLoop::getBottomBlock in that it works even if the loop is
+/// discontiguous.
+MachineBasicBlock *LoopBottom(const MachineLoop *Loop);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt
new file mode 100644
index 000000000000..2eb73befc50b
--- /dev/null
+++ b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt
@@ -0,0 +1,81 @@
+# Tests which are known to fail from the GCC torture test suite.
+
+# Syntax: Each line has a single test to be marked as a 'known failure' (or
+# 'exclusion'. Known failures are expected to fail, and will cause an error if
+# they pass. (Known failures that do not run at all will not cause an
+# error). The format is
+# <name> <attributes> # comment
+#
+# The attributes in this case represent the different arguments used to
+# compiler: 'wasm-s' is for compiling to .s files, and 'wasm-o' for compiling
+# to wasm object files (.o).
+
+# Computed gotos are not supported (Cannot select BlockAddress/BRIND)
+20071220-1.c wasm-o,O0
+20040302-1.c
+20041214-1.c O0
+20071210-1.c
+20071220-1.c wasm-s,O0
+920501-4.c
+920501-5.c
+comp-goto-1.c
+980526-1.c
+990208-1.c
+
+# WebAssembly hasn't implemented (will never?) __builtin_return_address
+20010122-1.c
+20030323-1.c
+20030811-1.c
+pr17377.c
+
+# Error: invalid output constraint '=t' in asm.
+990413-2.c
+
+# Error: __builtin_setjmp / __builtin_longjmp is not supported for the current target.
+built-in-setjmp.c
+pr60003.c
+
+# Error in the program / unsupported by Clang.
+20000822-1.c
+20010209-1.c
+20010605-1.c
+20030501-1.c
+20040520-1.c
+20061220-1.c
+20090219-1.c
+920415-1.c
+920428-2.c
+920501-7.c
+920612-2.c
+920721-4.c
+921017-1.c
+921215-1.c
+931002-1.c
+comp-goto-2.c
+nest-align-1.c
+nest-stdar-1.c
+nestfunc-1.c
+nestfunc-2.c
+nestfunc-3.c
+nestfunc-5.c
+nestfunc-6.c
+nestfunc-7.c
+pr22061-3.c
+pr22061-4.c
+pr24135.c
+pr51447.c
+20020412-1.c
+20040308-1.c
+20040423-1.c
+20041218-2.c
+20070919-1.c
+align-nest.c
+pr41935.c
+920302-1.c
+920501-3.c
+920728-1.c
+pr28865.c
+widechar-2.c
+
+# Untriaged: Assertion failure in WasmObjectWriter::applyRelocations
+20071220-2.c wasm-o,O0