diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2022-12-04 13:14:02 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2022-12-04 13:14:02 +0000 |
commit | 458532c2dd24708181cdce77b8f60938fdc90dc6 (patch) | |
tree | 2fe1fc91fff79aa8d0c52c1c486aa89deadc3d67 | |
parent | dafdd7863e9eeed3a714329b8758451cbcfc33bf (diff) |
Vendor import of llvm-project branch release/15.x llvmorg-15.0.6-0-g088f33605d8a.vendor/llvm-project/llvmorg-15.0.6-0-g088f33605d8a
39 files changed, 350 insertions, 170 deletions
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp index 36e10e4df4c1..44743fa0206f 100644 --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -7047,10 +7047,10 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, // Empty records are ignored for parameter passing purposes. if (isEmptyRecord(getContext(), Ty, true)) { - Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr), - getVAListElementType(CGF), SlotSize); - Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); - return Addr; + VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy); + auto *Load = CGF.Builder.CreateLoad(VAListAddr); + Address Addr = Address(Load, CGF.Int8Ty, SlotSize); + return CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty)); } CharUnits TySize = getContext().getTypeSizeInChars(Ty); diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp index f203cae1d329..665cdc3132fb 100644 --- a/clang/lib/Driver/ToolChains/Gnu.cpp +++ b/clang/lib/Driver/ToolChains/Gnu.cpp @@ -2139,31 +2139,21 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes( // and gcc-toolsets. if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux && D.getVFS().exists("/opt/rh")) { - // Find the directory in /opt/rh/ starting with gcc-toolset-* or - // devtoolset-* with the highest version number and add that - // one to our prefixes. - std::string ChosenToolsetDir; - unsigned ChosenToolsetVersion = 0; - std::error_code EC; - for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin("/opt/rh", EC), - LE; - !EC && LI != LE; LI = LI.increment(EC)) { - StringRef ToolsetDir = llvm::sys::path::filename(LI->path()); - unsigned ToolsetVersion; - if ((!ToolsetDir.startswith("gcc-toolset-") && - !ToolsetDir.startswith("devtoolset-")) || - ToolsetDir.substr(ToolsetDir.rfind('-') + 1) - .getAsInteger(10, ToolsetVersion)) - continue; - - if (ToolsetVersion > ChosenToolsetVersion) { - ChosenToolsetVersion = ToolsetVersion; - ChosenToolsetDir = "/opt/rh/" + ToolsetDir.str(); - } - } - - if (ChosenToolsetVersion > 0) - Prefixes.push_back(ChosenToolsetDir + "/root/usr"); + // TODO: We may want to remove this, since the functionality + // can be achieved using config files. + Prefixes.push_back("/opt/rh/gcc-toolset-12/root/usr"); + Prefixes.push_back("/opt/rh/gcc-toolset-11/root/usr"); + Prefixes.push_back("/opt/rh/gcc-toolset-10/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-12/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-11/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-10/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-9/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-8/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-7/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-6/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-4/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-3/root/usr"); + Prefixes.push_back("/opt/rh/devtoolset-2/root/usr"); } // Fall back to /usr which is used by most non-Solaris systems. diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp index 68158ec977cf..5d0d87fd2422 100644 --- a/clang/lib/Sema/SemaLookup.cpp +++ b/clang/lib/Sema/SemaLookup.cpp @@ -939,11 +939,9 @@ bool Sema::LookupBuiltin(LookupResult &R) { // If this is a builtin on this (or all) targets, create the decl. if (unsigned BuiltinID = II->getBuiltinID()) { - // In C++, C2x, and OpenCL (spec v1.2 s6.9.f), we don't have any - // predefined library functions like 'malloc'. Instead, we'll just - // error. - if ((getLangOpts().CPlusPlus || getLangOpts().OpenCL || - getLangOpts().C2x) && + // In C++ and OpenCL (spec v1.2 s6.9.f), we don't have any predefined + // library functions like 'malloc'. Instead, we'll just error. + if ((getLangOpts().CPlusPlus || getLangOpts().OpenCL) && Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) return false; diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp index 3ab5d26a9a75..edcac4d2ee9a 100644 --- a/clang/lib/Sema/SemaType.cpp +++ b/clang/lib/Sema/SemaType.cpp @@ -6443,6 +6443,9 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State, CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc(); } + while (BTFTagAttributedTypeLoc TL = CurrTL.getAs<BTFTagAttributedTypeLoc>()) + CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc(); + while (DependentAddressSpaceTypeLoc TL = CurrTL.getAs<DependentAddressSpaceTypeLoc>()) { fillDependentAddressSpaceTypeLoc(TL, D.getTypeObject(i).getAttrs()); diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp index 19149d079822..ab65612bce90 100644 --- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -2839,6 +2839,12 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D, return; } + if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) { + // FIXME: We should meaningfully implement this. + (void)TPO; + return; + } + llvm_unreachable("Support for this Decl not implemented."); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc index 9af296b1853a..b29665a63390 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc @@ -6703,7 +6703,7 @@ INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) { COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value); // Workaround a bug in glibc's "old" semaphore implementation by // zero-initializing the sem_t contents. This has to be done here because - // interceptors bind to the lowest symbols version by default, hitting the + // interceptors bind to the lowest version before glibc 2.36, hitting the // buggy code path while the non-sanitized build of the same code works fine. REAL(memset)(s, 0, sizeof(*s)); int res = REAL(sem_init)(s, pshared, value); diff --git a/libcxx/include/__config b/libcxx/include/__config index 589b5c3b2241..5f62b974170f 100644 --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -36,7 +36,7 @@ #ifdef __cplusplus -# define _LIBCPP_VERSION 15003 +# define _LIBCPP_VERSION 15006 # define _LIBCPP_CONCAT_IMPL(_X, _Y) _X##_Y # define _LIBCPP_CONCAT(_X, _Y) _LIBCPP_CONCAT_IMPL(_X, _Y) diff --git a/libcxx/include/__functional/function.h b/libcxx/include/__functional/function.h index db3af6e24101..55b607f3f804 100644 --- a/libcxx/include/__functional/function.h +++ b/libcxx/include/__functional/function.h @@ -883,7 +883,7 @@ template <class _Rp, class... _ArgTypes> class __policy_func<_Rp(_ArgTypes...)> #endif // _LIBCPP_NO_RTTI }; -#if defined(_LIBCPP_HAS_BLOCKS_RUNTIME) && !defined(_LIBCPP_HAS_OBJC_ARC) +#if defined(_LIBCPP_HAS_BLOCKS_RUNTIME) extern "C" void *_Block_copy(const void *); extern "C" void _Block_release(const void *); @@ -898,14 +898,22 @@ class __func<_Rp1(^)(_ArgTypes1...), _Alloc, _Rp(_ArgTypes...)> public: _LIBCPP_INLINE_VISIBILITY explicit __func(__block_type const& __f) +#ifdef _LIBCPP_HAS_OBJC_ARC + : __f_(__f) +#else : __f_(reinterpret_cast<__block_type>(__f ? _Block_copy(__f) : nullptr)) +#endif { } // [TODO] add && to save on a retain _LIBCPP_INLINE_VISIBILITY explicit __func(__block_type __f, const _Alloc& /* unused */) +#ifdef _LIBCPP_HAS_OBJC_ARC + : __f_(__f) +#else : __f_(reinterpret_cast<__block_type>(__f ? _Block_copy(__f) : nullptr)) +#endif { } virtual __base<_Rp(_ArgTypes...)>* __clone() const { @@ -921,8 +929,10 @@ public: } virtual void destroy() _NOEXCEPT { +#ifndef _LIBCPP_HAS_OBJC_ARC if (__f_) _Block_release(__f_); +#endif __f_ = 0; } @@ -950,7 +960,7 @@ public: #endif // _LIBCPP_NO_RTTI }; -#endif // _LIBCPP_HAS_EXTENSION_BLOCKS && !_LIBCPP_HAS_OBJC_ARC +#endif // _LIBCPP_HAS_EXTENSION_BLOCKS } // namespace __function diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp index 296fb4220012..58d863776430 100644 --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -1330,12 +1330,15 @@ static void readConfigs(opt::InputArgList &args) { parseClangOption(std::string("-") + arg->getValue(), arg->getSpelling()); // GCC collect2 passes -plugin-opt=path/to/lto-wrapper with an absolute or - // relative path. Just ignore. If not ended with "lto-wrapper", consider it an + // relative path. Just ignore. If not ended with "lto-wrapper" (or + // "lto-wrapper.exe" for GCC cross-compiled for Windows), consider it an // unsupported LLVMgold.so option and error. - for (opt::Arg *arg : args.filtered(OPT_plugin_opt_eq)) - if (!StringRef(arg->getValue()).endswith("lto-wrapper")) + for (opt::Arg *arg : args.filtered(OPT_plugin_opt_eq)) { + StringRef v(arg->getValue()); + if (!v.endswith("lto-wrapper") && !v.endswith("lto-wrapper.exe")) error(arg->getSpelling() + ": unknown plugin option '" + arg->getValue() + "'"); + } config->passPlugins = args::getStrings(args, OPT_load_pass_plugins); diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp index 927dc272b532..473809b05e9c 100644 --- a/lld/ELF/InputFiles.cpp +++ b/lld/ELF/InputFiles.cpp @@ -1157,7 +1157,7 @@ template <class ELFT> void ObjFile<ELFT>::postParse() { continue; } - if (binding == STB_WEAK) + if (sym.binding == STB_WEAK || binding == STB_WEAK) continue; std::lock_guard<std::mutex> lock(mu); ctx->duplicates.push_back({&sym, this, sec, eSym.st_value}); diff --git a/lld/MachO/UnwindInfoSection.cpp b/lld/MachO/UnwindInfoSection.cpp index ca6cbdfbb8bb..8f267251b7c0 100644 --- a/lld/MachO/UnwindInfoSection.cpp +++ b/lld/MachO/UnwindInfoSection.cpp @@ -158,7 +158,7 @@ class UnwindInfoSectionImpl final : public UnwindInfoSection { public: UnwindInfoSectionImpl() : cuOffsets(target->wordSize) {} uint64_t getSize() const override { return unwindInfoSize; } - void prepareRelocations() override; + void prepare() override; void finalize() override; void writeTo(uint8_t *buf) const override; @@ -166,6 +166,7 @@ private: void prepareRelocations(ConcatInputSection *); void relocateCompactUnwind(std::vector<CompactUnwindEntry> &); void encodePersonalities(); + Symbol *canonicalizePersonality(Symbol *); uint64_t unwindInfoSize = 0; std::vector<decltype(symbols)::value_type> symbolsVec; @@ -218,14 +219,24 @@ void UnwindInfoSection::addSymbol(const Defined *d) { } } -void UnwindInfoSectionImpl::prepareRelocations() { +void UnwindInfoSectionImpl::prepare() { // This iteration needs to be deterministic, since prepareRelocations may add // entries to the GOT. Hence the use of a MapVector for // UnwindInfoSection::symbols. for (const Defined *d : make_second_range(symbols)) - if (d->unwindEntry && - d->unwindEntry->getName() == section_names::compactUnwind) - prepareRelocations(d->unwindEntry); + if (d->unwindEntry) { + if (d->unwindEntry->getName() == section_names::compactUnwind) { + prepareRelocations(d->unwindEntry); + } else { + // We don't have to add entries to the GOT here because FDEs have + // explicit GOT relocations, so Writer::scanRelocations() will add those + // GOT entries. However, we still need to canonicalize the personality + // pointers (like prepareRelocations() does for CU entries) in order + // to avoid overflowing the 3-personality limit. + FDE &fde = cast<ObjFile>(d->getFile())->fdes[d->unwindEntry]; + fde.personality = canonicalizePersonality(fde.personality); + } + } } // Compact unwind relocations have different semantics, so we handle them in a @@ -279,6 +290,7 @@ void UnwindInfoSectionImpl::prepareRelocations(ConcatInputSection *isec) { continue; } + // Similar to canonicalizePersonality(), but we also register a GOT entry. if (auto *defined = dyn_cast<Defined>(s)) { // Check if we have created a synthetic symbol at the same address. Symbol *&personality = @@ -291,6 +303,7 @@ void UnwindInfoSectionImpl::prepareRelocations(ConcatInputSection *isec) { } continue; } + assert(isa<DylibSymbol>(s)); in.got->addEntry(s); continue; @@ -320,6 +333,18 @@ void UnwindInfoSectionImpl::prepareRelocations(ConcatInputSection *isec) { } } +Symbol *UnwindInfoSectionImpl::canonicalizePersonality(Symbol *personality) { + if (auto *defined = dyn_cast_or_null<Defined>(personality)) { + // Check if we have created a synthetic symbol at the same address. + Symbol *&synth = personalityTable[{defined->isec, defined->value}]; + if (synth == nullptr) + synth = defined; + else if (synth != defined) + return synth; + } + return personality; +} + // We need to apply the relocations to the pre-link compact unwind section // before converting it to post-link form. There should only be absolute // relocations here: since we are not emitting the pre-link CU section, there diff --git a/lld/MachO/UnwindInfoSection.h b/lld/MachO/UnwindInfoSection.h index c6b334731c75..f2bc3213a127 100644 --- a/lld/MachO/UnwindInfoSection.h +++ b/lld/MachO/UnwindInfoSection.h @@ -24,7 +24,7 @@ public: // section entirely. bool isNeeded() const override { return !allEntriesAreOmitted; } void addSymbol(const Defined *); - virtual void prepareRelocations() = 0; + virtual void prepare() = 0; protected: UnwindInfoSection(); diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp index 3c44a60f4be2..ce9672dd0b4f 100644 --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -675,7 +675,7 @@ void Writer::scanRelocations() { } } - in.unwindInfo->prepareRelocations(); + in.unwindInfo->prepare(); } void Writer::scanSymbols() { diff --git a/lldb/bindings/interfaces.swig b/lldb/bindings/interfaces.swig index c9a6d0f06056..021c7683d170 100644 --- a/lldb/bindings/interfaces.swig +++ b/lldb/bindings/interfaces.swig @@ -1,8 +1,5 @@ /* Various liblldb typedefs that SWIG needs to know about. */ #define __extension__ /* Undefine GCC keyword to make Swig happy when processing glibc's stdint.h. */ -/* The ISO C99 standard specifies that in C++ implementations limit macros such - as INT32_MAX should only be defined if __STDC_LIMIT_MACROS is. */ -#define __STDC_LIMIT_MACROS %include "stdint.i" %include "lldb/lldb-defines.h" diff --git a/lldb/bindings/python/python-typemaps.swig b/lldb/bindings/python/python-typemaps.swig index bf3de66b91bf..d45431c771ca 100644 --- a/lldb/bindings/python/python-typemaps.swig +++ b/lldb/bindings/python/python-typemaps.swig @@ -435,7 +435,7 @@ template <> bool SetNumberFromPyObject<double>(double &number, PyObject *obj) { %typemap(out) lldb::FileSP { $result = nullptr; - lldb::FileSP &sp = $1; + const lldb::FileSP &sp = $1; if (sp) { PythonFile pyfile = unwrapOrSetPythonException(PythonFile::FromFile(*sp)); if (!pyfile.IsValid()) diff --git a/lldb/include/lldb/API/SBType.h b/lldb/include/lldb/API/SBType.h index 244d328b51f4..aa45aeeec476 100644 --- a/lldb/include/lldb/API/SBType.h +++ b/lldb/include/lldb/API/SBType.h @@ -182,6 +182,8 @@ public: lldb::SBType GetTemplateArgumentType(uint32_t idx); + /// Return the TemplateArgumentKind of the template argument at index idx. + /// Variadic argument packs are automatically expanded. lldb::TemplateArgumentKind GetTemplateArgumentKind(uint32_t idx); lldb::SBType GetFunctionReturnType(); diff --git a/lldb/include/lldb/Symbol/CompilerType.h b/lldb/include/lldb/Symbol/CompilerType.h index 0ad05a27570e..aefd19d0a859 100644 --- a/lldb/include/lldb/Symbol/CompilerType.h +++ b/lldb/include/lldb/Symbol/CompilerType.h @@ -338,14 +338,28 @@ public: GetIndexOfChildMemberWithName(const char *name, bool omit_empty_base_classes, std::vector<uint32_t> &child_indexes) const; - size_t GetNumTemplateArguments() const; - - lldb::TemplateArgumentKind GetTemplateArgumentKind(size_t idx) const; - CompilerType GetTypeTemplateArgument(size_t idx) const; + /// Return the number of template arguments the type has. + /// If expand_pack is true, then variadic argument packs are automatically + /// expanded to their supplied arguments. If it is false an argument pack + /// will only count as 1 argument. + size_t GetNumTemplateArguments(bool expand_pack = false) const; + + // Return the TemplateArgumentKind of the template argument at index idx. + // If expand_pack is true, then variadic argument packs are automatically + // expanded to their supplied arguments. With expand_pack set to false, an + // arguement pack will count as 1 argument and return a type of Pack. + lldb::TemplateArgumentKind + GetTemplateArgumentKind(size_t idx, bool expand_pack = false) const; + CompilerType GetTypeTemplateArgument(size_t idx, + bool expand_pack = false) const; /// Returns the value of the template argument and its type. + /// If expand_pack is true, then variadic argument packs are automatically + /// expanded to their supplied arguments. With expand_pack set to false, an + /// arguement pack will count as 1 argument and it is invalid to call this + /// method on the pack argument. llvm::Optional<IntegralTemplateArgument> - GetIntegralTemplateArgument(size_t idx) const; + GetIntegralTemplateArgument(size_t idx, bool expand_pack = false) const; CompilerType GetTypeForFormatters() const; diff --git a/lldb/include/lldb/Symbol/TypeSystem.h b/lldb/include/lldb/Symbol/TypeSystem.h index be5783596897..769449a4933b 100644 --- a/lldb/include/lldb/Symbol/TypeSystem.h +++ b/lldb/include/lldb/Symbol/TypeSystem.h @@ -346,14 +346,18 @@ public: const char *name, bool omit_empty_base_classes, std::vector<uint32_t> &child_indexes) = 0; - virtual size_t GetNumTemplateArguments(lldb::opaque_compiler_type_t type); + virtual size_t GetNumTemplateArguments(lldb::opaque_compiler_type_t type, + bool expand_pack); virtual lldb::TemplateArgumentKind - GetTemplateArgumentKind(lldb::opaque_compiler_type_t type, size_t idx); - virtual CompilerType GetTypeTemplateArgument(lldb::opaque_compiler_type_t type, - size_t idx); + GetTemplateArgumentKind(lldb::opaque_compiler_type_t type, size_t idx, + bool expand_pack); + virtual CompilerType + GetTypeTemplateArgument(lldb::opaque_compiler_type_t type, size_t idx, + bool expand_pack); virtual llvm::Optional<CompilerType::IntegralTemplateArgument> - GetIntegralTemplateArgument(lldb::opaque_compiler_type_t type, size_t idx); + GetIntegralTemplateArgument(lldb::opaque_compiler_type_t type, size_t idx, + bool expand_pack); // Dumping types diff --git a/lldb/source/API/SBType.cpp b/lldb/source/API/SBType.cpp index 533930c0544b..adc60a084367 100644 --- a/lldb/source/API/SBType.cpp +++ b/lldb/source/API/SBType.cpp @@ -542,7 +542,8 @@ uint32_t SBType::GetNumberOfTemplateArguments() { LLDB_INSTRUMENT_VA(this); if (IsValid()) - return m_opaque_sp->GetCompilerType(false).GetNumTemplateArguments(); + return m_opaque_sp->GetCompilerType(false).GetNumTemplateArguments( + /*expand_pack=*/true); return 0; } @@ -553,13 +554,15 @@ lldb::SBType SBType::GetTemplateArgumentType(uint32_t idx) { return SBType(); CompilerType type; + const bool expand_pack = true; switch(GetTemplateArgumentKind(idx)) { case eTemplateArgumentKindType: - type = m_opaque_sp->GetCompilerType(false).GetTypeTemplateArgument(idx); + type = m_opaque_sp->GetCompilerType(false).GetTypeTemplateArgument( + idx, expand_pack); break; case eTemplateArgumentKindIntegral: type = m_opaque_sp->GetCompilerType(false) - .GetIntegralTemplateArgument(idx) + .GetIntegralTemplateArgument(idx, expand_pack) ->type; break; default: @@ -574,7 +577,8 @@ lldb::TemplateArgumentKind SBType::GetTemplateArgumentKind(uint32_t idx) { LLDB_INSTRUMENT_VA(this, idx); if (IsValid()) - return m_opaque_sp->GetCompilerType(false).GetTemplateArgumentKind(idx); + return m_opaque_sp->GetCompilerType(false).GetTemplateArgumentKind( + idx, /*expand_pack=*/true); return eTemplateArgumentKindNull; } diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp index c6eb693bba6b..a1ebe5830bb9 100644 --- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp +++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.cpp @@ -7096,7 +7096,8 @@ TypeSystemClang::GetIndexOfChildWithName(lldb::opaque_compiler_type_t type, } size_t -TypeSystemClang::GetNumTemplateArguments(lldb::opaque_compiler_type_t type) { +TypeSystemClang::GetNumTemplateArguments(lldb::opaque_compiler_type_t type, + bool expand_pack) { if (!type) return 0; @@ -7111,8 +7112,17 @@ TypeSystemClang::GetNumTemplateArguments(lldb::opaque_compiler_type_t type) { const clang::ClassTemplateSpecializationDecl *template_decl = llvm::dyn_cast<clang::ClassTemplateSpecializationDecl>( cxx_record_decl); - if (template_decl) - return template_decl->getTemplateArgs().size(); + if (template_decl) { + const auto &template_arg_list = template_decl->getTemplateArgs(); + size_t num_args = template_arg_list.size(); + assert(num_args && "template specialization without any args"); + if (expand_pack && num_args) { + const auto &pack = template_arg_list[num_args - 1]; + if (pack.getKind() == clang::TemplateArgument::Pack) + num_args += pack.pack_size() - 1; + } + return num_args; + } } } break; @@ -7149,15 +7159,51 @@ TypeSystemClang::GetAsTemplateSpecialization( } } +const TemplateArgument * +GetNthTemplateArgument(const clang::ClassTemplateSpecializationDecl *decl, + size_t idx, bool expand_pack) { + const auto &args = decl->getTemplateArgs(); + const size_t args_size = args.size(); + + assert(args_size && "template specialization without any args"); + if (!args_size) + return nullptr; + + const size_t last_idx = args_size - 1; + + // We're asked for a template argument that can't be a parameter pack, so + // return it without worrying about 'expand_pack'. + if (idx < last_idx) + return &args[idx]; + + // We're asked for the last template argument but we don't want/need to + // expand it. + if (!expand_pack || args[last_idx].getKind() != clang::TemplateArgument::Pack) + return idx >= args.size() ? nullptr : &args[idx]; + + // Index into the expanded pack. + // Note that 'idx' counts from the beginning of all template arguments + // (including the ones preceding the parameter pack). + const auto &pack = args[last_idx]; + const size_t pack_idx = idx - last_idx; + const size_t pack_size = pack.pack_size(); + assert(pack_idx < pack_size && "parameter pack index out-of-bounds"); + return &pack.pack_elements()[pack_idx]; +} + lldb::TemplateArgumentKind TypeSystemClang::GetTemplateArgumentKind(lldb::opaque_compiler_type_t type, - size_t arg_idx) { + size_t arg_idx, bool expand_pack) { const clang::ClassTemplateSpecializationDecl *template_decl = GetAsTemplateSpecialization(type); - if (! template_decl || arg_idx >= template_decl->getTemplateArgs().size()) + if (!template_decl) + return eTemplateArgumentKindNull; + + const auto *arg = GetNthTemplateArgument(template_decl, arg_idx, expand_pack); + if (!arg) return eTemplateArgumentKindNull; - switch (template_decl->getTemplateArgs()[arg_idx].getKind()) { + switch (arg->getKind()) { case clang::TemplateArgument::Null: return eTemplateArgumentKindNull; @@ -7190,35 +7236,32 @@ TypeSystemClang::GetTemplateArgumentKind(lldb::opaque_compiler_type_t type, CompilerType TypeSystemClang::GetTypeTemplateArgument(lldb::opaque_compiler_type_t type, - size_t idx) { + size_t idx, bool expand_pack) { const clang::ClassTemplateSpecializationDecl *template_decl = GetAsTemplateSpecialization(type); - if (!template_decl || idx >= template_decl->getTemplateArgs().size()) + if (!template_decl) return CompilerType(); - const clang::TemplateArgument &template_arg = - template_decl->getTemplateArgs()[idx]; - if (template_arg.getKind() != clang::TemplateArgument::Type) + const auto *arg = GetNthTemplateArgument(template_decl, idx, expand_pack); + if (!arg || arg->getKind() != clang::TemplateArgument::Type) return CompilerType(); - return GetType(template_arg.getAsType()); + return GetType(arg->getAsType()); } Optional<CompilerType::IntegralTemplateArgument> TypeSystemClang::GetIntegralTemplateArgument(lldb::opaque_compiler_type_t type, - size_t idx) { + size_t idx, bool expand_pack) { const clang::ClassTemplateSpecializationDecl *template_decl = GetAsTemplateSpecialization(type); - if (! template_decl || idx >= template_decl->getTemplateArgs().size()) + if (!template_decl) return llvm::None; - const clang::TemplateArgument &template_arg = - template_decl->getTemplateArgs()[idx]; - if (template_arg.getKind() != clang::TemplateArgument::Integral) + const auto *arg = GetNthTemplateArgument(template_decl, idx, expand_pack); + if (!arg || arg->getKind() != clang::TemplateArgument::Integral) return llvm::None; - return { - {template_arg.getAsIntegral(), GetType(template_arg.getIntegralType())}}; + return {{arg->getAsIntegral(), GetType(arg->getIntegralType())}}; } CompilerType TypeSystemClang::GetTypeForFormatters(void *type) { diff --git a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h index 24dbb71c8f4d..7f25a6df548f 100644 --- a/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h +++ b/lldb/source/Plugins/TypeSystem/Clang/TypeSystemClang.h @@ -91,7 +91,7 @@ public: void SetOwningModule(OptionalClangModuleID id); /// \} }; - + /// A TypeSystem implementation based on Clang. /// /// This class uses a single clang::ASTContext as the backend for storing @@ -334,7 +334,7 @@ public: llvm::SmallVector<const char *, 2> names; llvm::SmallVector<clang::TemplateArgument, 2> args; - + const char * pack_name = nullptr; std::unique_ptr<TemplateParameterInfos> packed_args; }; @@ -537,7 +537,7 @@ public: #ifndef NDEBUG bool Verify(lldb::opaque_compiler_type_t type) override; #endif - + bool IsArrayType(lldb::opaque_compiler_type_t type, CompilerType *element_type, uint64_t *size, bool *is_incomplete) override; @@ -810,16 +810,17 @@ public: const char *name, bool omit_empty_base_classes, std::vector<uint32_t> &child_indexes) override; - size_t GetNumTemplateArguments(lldb::opaque_compiler_type_t type) override; + size_t GetNumTemplateArguments(lldb::opaque_compiler_type_t type, + bool expand_pack) override; lldb::TemplateArgumentKind - GetTemplateArgumentKind(lldb::opaque_compiler_type_t type, - size_t idx) override; + GetTemplateArgumentKind(lldb::opaque_compiler_type_t type, size_t idx, + bool expand_pack) override; CompilerType GetTypeTemplateArgument(lldb::opaque_compiler_type_t type, - size_t idx) override; + size_t idx, bool expand_pack) override; llvm::Optional<CompilerType::IntegralTemplateArgument> - GetIntegralTemplateArgument(lldb::opaque_compiler_type_t type, - size_t idx) override; + GetIntegralTemplateArgument(lldb::opaque_compiler_type_t type, size_t idx, + bool expand_pack) override; CompilerType GetTypeForFormatters(void *type) override; diff --git a/lldb/source/Symbol/CompilerType.cpp b/lldb/source/Symbol/CompilerType.cpp index ac98352c235e..bef456583687 100644 --- a/lldb/source/Symbol/CompilerType.cpp +++ b/lldb/source/Symbol/CompilerType.cpp @@ -659,30 +659,32 @@ size_t CompilerType::GetIndexOfChildMemberWithName( return 0; } -size_t CompilerType::GetNumTemplateArguments() const { +size_t CompilerType::GetNumTemplateArguments(bool expand_pack) const { if (IsValid()) { - return m_type_system->GetNumTemplateArguments(m_type); + return m_type_system->GetNumTemplateArguments(m_type, expand_pack); } return 0; } -TemplateArgumentKind CompilerType::GetTemplateArgumentKind(size_t idx) const { +TemplateArgumentKind +CompilerType::GetTemplateArgumentKind(size_t idx, bool expand_pack) const { if (IsValid()) - return m_type_system->GetTemplateArgumentKind(m_type, idx); + return m_type_system->GetTemplateArgumentKind(m_type, idx, expand_pack); return eTemplateArgumentKindNull; } -CompilerType CompilerType::GetTypeTemplateArgument(size_t idx) const { +CompilerType CompilerType::GetTypeTemplateArgument(size_t idx, + bool expand_pack) const { if (IsValid()) { - return m_type_system->GetTypeTemplateArgument(m_type, idx); + return m_type_system->GetTypeTemplateArgument(m_type, idx, expand_pack); } return CompilerType(); } llvm::Optional<CompilerType::IntegralTemplateArgument> -CompilerType::GetIntegralTemplateArgument(size_t idx) const { +CompilerType::GetIntegralTemplateArgument(size_t idx, bool expand_pack) const { if (IsValid()) - return m_type_system->GetIntegralTemplateArgument(m_type, idx); + return m_type_system->GetIntegralTemplateArgument(m_type, idx, expand_pack); return llvm::None; } diff --git a/lldb/source/Symbol/TypeSystem.cpp b/lldb/source/Symbol/TypeSystem.cpp index 3092dc0bf0a4..412373533aab 100644 --- a/lldb/source/Symbol/TypeSystem.cpp +++ b/lldb/source/Symbol/TypeSystem.cpp @@ -118,23 +118,25 @@ CompilerType TypeSystem::GetTypeForFormatters(void *type) { return CompilerType(this, type); } -size_t TypeSystem::GetNumTemplateArguments(lldb::opaque_compiler_type_t type) { +size_t TypeSystem::GetNumTemplateArguments(lldb::opaque_compiler_type_t type, + bool expand_pack) { return 0; } TemplateArgumentKind -TypeSystem::GetTemplateArgumentKind(opaque_compiler_type_t type, size_t idx) { +TypeSystem::GetTemplateArgumentKind(opaque_compiler_type_t type, size_t idx, + bool expand_pack) { return eTemplateArgumentKindNull; } CompilerType TypeSystem::GetTypeTemplateArgument(opaque_compiler_type_t type, - size_t idx) { + size_t idx, bool expand_pack) { return CompilerType(); } llvm::Optional<CompilerType::IntegralTemplateArgument> -TypeSystem::GetIntegralTemplateArgument(opaque_compiler_type_t type, - size_t idx) { +TypeSystem::GetIntegralTemplateArgument(opaque_compiler_type_t type, size_t idx, + bool expand_pack) { return llvm::None; } diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index c4795a80ead2..bc20f33f174c 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -1110,6 +1110,12 @@ void InterleavedAccessInfo::collectConstStrideAccesses( continue; Type *ElementTy = getLoadStoreType(&I); + // Currently, codegen doesn't support cases where the type size doesn't + // match the alloc size. Skip them for now. + uint64_t Size = DL.getTypeAllocSize(ElementTy); + if (Size * 8 != DL.getTypeSizeInBits(ElementTy)) + continue; + // We don't check wrapping here because we don't know yet if Ptr will be // part of a full group or a group with gaps. Checking wrapping for all // pointers (even those that end up in groups with no gaps) will be overly @@ -1121,7 +1127,6 @@ void InterleavedAccessInfo::collectConstStrideAccesses( /*Assume=*/true, /*ShouldCheckWrap=*/false); const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr); - uint64_t Size = DL.getTypeAllocSize(ElementTy); AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size, getLoadStoreAlignment(&I)); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 35650b9bd00e..ecdaef0442da 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -9693,6 +9693,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { Entry.Alignment = Alignment; CLI.getArgs().insert(CLI.getArgs().begin(), Entry); CLI.NumFixedArgs += 1; + CLI.getArgs()[0].IndirectType = CLI.RetTy; CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext()); // sret demotion isn't compatible with tail-calls, since the sret argument diff --git a/llvm/lib/CodeGen/TypePromotion.cpp b/llvm/lib/CodeGen/TypePromotion.cpp index 8dc8d381ad16..a63118067139 100644 --- a/llvm/lib/CodeGen/TypePromotion.cpp +++ b/llvm/lib/CodeGen/TypePromotion.cpp @@ -569,7 +569,8 @@ void IRPromoter::TruncateSinks() { void IRPromoter::Cleanup() { LLVM_DEBUG(dbgs() << "IR Promotion: Cleanup..\n"); // Some zexts will now have become redundant, along with their trunc - // operands, so remove them + // operands, so remove them. + // Some zexts need to be replaced with truncate if src bitwidth is larger. for (auto *V : Visited) { if (!isa<ZExtInst>(V)) continue; @@ -584,6 +585,11 @@ void IRPromoter::Cleanup() { << "\n"); ReplaceAllUsersOfWith(ZExt, Src); continue; + } else if (ZExt->getSrcTy()->getScalarSizeInBits() > PromotedWidth) { + IRBuilder<> Builder{ZExt}; + Value *Trunc = Builder.CreateTrunc(Src, ZExt->getDestTy()); + ReplaceAllUsersOfWith(ZExt, Trunc); + continue; } // We've inserted a trunc for a zext sink, but we already know that the diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 75594f90c926..b9962da1d302 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1040,7 +1040,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { Name, F->getParent()); // The new function may also need remangling. - if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F)) + if (auto Result = llvm::Intrinsic::remangleIntrinsicFunction(NewFn)) NewFn = *Result; return true; } diff --git a/llvm/lib/Support/Unix/Signals.inc b/llvm/lib/Support/Unix/Signals.inc index bf145bffe8bf..23ac012b9e00 100644 --- a/llvm/lib/Support/Unix/Signals.inc +++ b/llvm/lib/Support/Unix/Signals.inc @@ -432,10 +432,6 @@ void llvm::sys::SetOneShotPipeSignalFunction(void (*Handler)()) { } void llvm::sys::DefaultOneShotPipeSignalHandler() { - // UNIX03 conformance requires a non-zero exit code and an error message - // to stderr when writing to a closed stdout fails. - errs() << "error: write on a pipe with no reader\n"; - // Send a special return code that drivers can check for, from sysexits.h. exit(EX_IOERR); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index c28216048d7c..06e21f90ebf1 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -5805,7 +5805,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments( assert(!Res && "Call operand has unhandled type"); (void)Res; } - SmallVector<SDValue, 16> ArgValues; + unsigned ExtraArgLocs = 0; for (unsigned i = 0, e = Ins.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i - ExtraArgLocs]; @@ -6157,17 +6157,10 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, /// appropriate copies out of appropriate physical registers. SDValue AArch64TargetLowering::LowerCallResult( SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, + const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, SDValue ThisVal) const { - CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); - // Assign locations to each value returned by this call. - SmallVector<CCValAssign, 16> RVLocs; DenseMap<unsigned, SDValue> CopiedRegs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, - *DAG.getContext()); - CCInfo.AnalyzeCallResult(Ins, RetCC); - // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign VA = RVLocs[i]; @@ -6508,17 +6501,39 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, GuardWithBTI = FuncInfo->branchTargetEnforcement(); } + // Analyze operands of the call, assigning locations to each operand. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); + + if (IsVarArg) { + unsigned NumArgs = Outs.size(); + + for (unsigned i = 0; i != NumArgs; ++i) { + if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector()) + report_fatal_error("Passing SVE types to variadic functions is " + "currently not supported"); + } + } + + analyzeCallOperands(*this, Subtarget, CLI, CCInfo); + + CCAssignFn *RetCC = CCAssignFnForReturn(CallConv); + // Assign locations to each value returned by this call. + SmallVector<CCValAssign, 16> RVLocs; + CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + RetCCInfo.AnalyzeCallResult(Ins, RetCC); + // Check callee args/returns for SVE registers and set calling convention // accordingly. if (CallConv == CallingConv::C || CallConv == CallingConv::Fast) { - bool CalleeOutSVE = any_of(Outs, [](ISD::OutputArg &Out){ - return Out.VT.isScalableVector(); - }); - bool CalleeInSVE = any_of(Ins, [](ISD::InputArg &In){ - return In.VT.isScalableVector(); - }); - - if (CalleeInSVE || CalleeOutSVE) + auto HasSVERegLoc = [](CCValAssign &Loc) { + if (!Loc.isRegLoc()) + return false; + return AArch64::ZPRRegClass.contains(Loc.getLocReg()) || + AArch64::PPRRegClass.contains(Loc.getLocReg()); + }; + if (any_of(RVLocs, HasSVERegLoc) || any_of(ArgLocs, HasSVERegLoc)) CallConv = CallingConv::AArch64_SVE_VectorCall; } @@ -6540,22 +6555,6 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, report_fatal_error("failed to perform tail call elimination on a call " "site marked musttail"); - // Analyze operands of the call, assigning locations to each operand. - SmallVector<CCValAssign, 16> ArgLocs; - CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); - - if (IsVarArg) { - unsigned NumArgs = Outs.size(); - - for (unsigned i = 0; i != NumArgs; ++i) { - if (!Outs[i].IsFixed && Outs[i].VT.isScalableVector()) - report_fatal_error("Passing SVE types to variadic functions is " - "currently not supported"); - } - } - - analyzeCallOperands(*this, Subtarget, CLI, CCInfo); - // Get a count of how many bytes are to be pushed on the stack. unsigned NumBytes = CCInfo.getNextStackOffset(); @@ -6961,7 +6960,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, // Handle result values, copying them out of physregs into vregs that we // return. - return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, + return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, RVLocs, DL, DAG, InVals, IsThisReturn, IsThisReturn ? OutVals[0] : SDValue()); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 1ba2e2f315ec..ff3bfe897869 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -894,7 +894,7 @@ private: SDValue LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl<ISD::InputArg> &Ins, + const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, SDValue ThisVal) const; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index f7d139adc63b..f6b7d1ffc6d2 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -249,6 +249,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, case ISD::STORE: case ISD::BUILD_VECTOR: case ISD::BITCAST: + case ISD::UNDEF: case ISD::EXTRACT_VECTOR_ELT: case ISD::INSERT_VECTOR_ELT: case ISD::EXTRACT_SUBVECTOR: @@ -516,6 +517,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, case ISD::STORE: case ISD::BUILD_VECTOR: case ISD::BITCAST: + case ISD::UNDEF: case ISD::EXTRACT_VECTOR_ELT: case ISD::INSERT_VECTOR_ELT: case ISD::INSERT_SUBVECTOR: diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp index 3c102463ba08..cbfd2bc68f18 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -1036,7 +1036,7 @@ InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, // split, we may need an expensive shuffle to get two in sync. This has the // effect of making larger than legal compares (v8i32 for example) // expensive. - if (LT.second.getVectorNumElements() > 2) { + if (LT.second.isVector() && LT.second.getVectorNumElements() > 2) { if (LT.first > 1) return LT.first * BaseCost + BaseT::getScalarizationOverhead(VecCondTy, true, false); diff --git a/llvm/lib/Target/Sparc/SparcCallingConv.td b/llvm/lib/Target/Sparc/SparcCallingConv.td index e6d23f741ea5..8afd0a7fc09a 100644 --- a/llvm/lib/Target/Sparc/SparcCallingConv.td +++ b/llvm/lib/Target/Sparc/SparcCallingConv.td @@ -125,10 +125,14 @@ def CC_Sparc64 : CallingConv<[ def RetCC_Sparc64 : CallingConv<[ // A single f32 return value always goes in %f0. The ABI doesn't specify what // happens to multiple f32 return values outside a struct. - CCIfType<[f32], CCCustom<"CC_Sparc64_Half">>, + CCIfType<[f32], CCCustom<"RetCC_Sparc64_Half">>, - // Otherwise, return values are passed exactly like arguments. - CCDelegateTo<CC_Sparc64> + // Otherwise, return values are passed exactly like arguments, except that + // returns that are too big to fit into the registers is passed as an sret + // instead. + CCIfInReg<CCIfType<[i32, f32], CCCustom<"RetCC_Sparc64_Half">>>, + CCIfType<[i32], CCPromoteToType<i64>>, + CCCustom<"RetCC_Sparc64_Full"> ]>; // Callee-saved registers are handled by the register window mechanism. diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp index 2cb74e7709c7..f55675089102 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -101,9 +101,9 @@ static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT, } // Allocate a full-sized argument for the 64-bit ABI. -static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, - MVT &LocVT, CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State) { +static bool Analyze_CC_Sparc64_Full(bool IsReturn, unsigned &ValNo, MVT &ValVT, + MVT &LocVT, CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { assert((LocVT == MVT::f32 || LocVT == MVT::f128 || LocVT.getSizeInBits() == 64) && "Can't handle non-64 bits locations"); @@ -133,6 +133,11 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, return true; } + // Bail out if this is a return CC and we run out of registers to place + // values into. + if (IsReturn) + return false; + // This argument goes on the stack in an 8-byte slot. // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to // the right-aligned float. The first 4 bytes of the stack slot are undefined. @@ -146,9 +151,9 @@ static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, // Allocate a half-sized argument for the 64-bit ABI. // // This is used when passing { float, int } structs by value in registers. -static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, - MVT &LocVT, CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State) { +static bool Analyze_CC_Sparc64_Half(bool IsReturn, unsigned &ValNo, MVT &ValVT, + MVT &LocVT, CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); unsigned Offset = State.AllocateStack(4, Align(4)); @@ -174,10 +179,43 @@ static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, return true; } + // Bail out if this is a return CC and we run out of registers to place + // values into. + if (IsReturn) + return false; + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return true; } +static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { + return Analyze_CC_Sparc64_Full(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags, + State); +} + +static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { + return Analyze_CC_Sparc64_Half(false, ValNo, ValVT, LocVT, LocInfo, ArgFlags, + State); +} + +static bool RetCC_Sparc64_Full(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { + return Analyze_CC_Sparc64_Full(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags, + State); +} + +static bool RetCC_Sparc64_Half(unsigned &ValNo, MVT &ValVT, MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, CCState &State) { + return Analyze_CC_Sparc64_Half(true, ValNo, ValVT, LocVT, LocInfo, ArgFlags, + State); +} + #include "SparcGenCallingConv.inc" // The calling conventions in SparcCallingConv.td are described in terms of the @@ -191,6 +229,15 @@ static unsigned toCallerWindow(unsigned Reg) { return Reg; } +bool SparcTargetLowering::CanLowerReturn( + CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); + return CCInfo.CheckReturn(Outs, Subtarget->is64Bit() ? RetCC_Sparc64 + : RetCC_Sparc32); +} + SDValue SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, @@ -1031,6 +1078,7 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { + assert(RVLocs[i].isRegLoc() && "Can only return in registers!"); if (RVLocs[i].getLocVT() == MVT::v2i32) { SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32); SDValue Lo = DAG.getCopyFromReg( @@ -1346,6 +1394,7 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); unsigned Reg = toCallerWindow(VA.getLocReg()); // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h index 2768bb20566a..16e4f2687054 100644 --- a/llvm/lib/Target/Sparc/SparcISelLowering.h +++ b/llvm/lib/Target/Sparc/SparcISelLowering.h @@ -144,6 +144,11 @@ namespace llvm { SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl<SDValue> &InVals) const; + bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, + bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + LLVMContext &Context) const override; + SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl<ISD::OutputArg> &Outs, const SmallVectorImpl<SDValue> &OutVals, diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 6df0409256bb..6fc7b29c5b78 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -2003,7 +2003,7 @@ OptimizeFunctions(Module &M, // FIXME: We should also hoist alloca affected by this to the entry // block if possible. if (F.getAttributes().hasAttrSomewhere(Attribute::InAlloca) && - !F.hasAddressTaken() && !hasMustTailCallers(&F)) { + !F.hasAddressTaken() && !hasMustTailCallers(&F) && !F.isVarArg()) { RemoveAttribute(&F, Attribute::InAlloca); Changed = true; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index bc01d2ef7fe2..52596b30494f 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3289,6 +3289,10 @@ bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) { if (CallerPAL.hasParamAttr(i, Attribute::SwiftError)) return false; + if (CallerPAL.hasParamAttr(i, Attribute::ByVal) != + Callee->getAttributes().hasParamAttr(i, Attribute::ByVal)) + return false; // Cannot transform to or from byval. + // If the parameter is passed as a byval argument, then we have to have a // sized type and the sized type has to have the same size as the old type. if (ParamTy != ActTy && CallerPAL.hasParamAttr(i, Attribute::ByVal)) { diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 143a035749c7..644c5c82e58e 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -1210,8 +1210,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) { BasicBlock *BB = PN.getParent(); Align MaxAlign; uint64_t APWidth = DL.getIndexTypeSizeInBits(PN.getType()); - APInt MaxSize(APWidth, 0); - bool HaveLoad = false; + Type *LoadType = nullptr; for (User *U : PN.users()) { LoadInst *LI = dyn_cast<LoadInst>(U); if (!LI || !LI->isSimple()) @@ -1223,21 +1222,27 @@ static bool isSafePHIToSpeculate(PHINode &PN) { if (LI->getParent() != BB) return false; + if (LoadType) { + if (LoadType != LI->getType()) + return false; + } else { + LoadType = LI->getType(); + } + // Ensure that there are no instructions between the PHI and the load that // could store. for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) if (BBI->mayWriteToMemory()) return false; - uint64_t Size = DL.getTypeStoreSize(LI->getType()).getFixedSize(); MaxAlign = std::max(MaxAlign, LI->getAlign()); - MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize; - HaveLoad = true; } - if (!HaveLoad) + if (!LoadType) return false; + APInt LoadSize = APInt(APWidth, DL.getTypeStoreSize(LoadType).getFixedSize()); + // We can only transform this if it is safe to push the loads into the // predecessor blocks. The only thing to watch out for is that we can't put // a possibly trapping load in the predecessor if it is a critical edge. @@ -1259,7 +1264,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) { // If this pointer is always safe to load, or if we can prove that there // is already a load in the block, then we can move the load to the pred // block. - if (isSafeToLoadUnconditionally(InVal, MaxAlign, MaxSize, DL, TI)) + if (isSafeToLoadUnconditionally(InVal, MaxAlign, LoadSize, DL, TI)) continue; return false; diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp index 42be67f3cfc0..264da2187754 100644 --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -356,9 +356,9 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, // If this is memset, we just need to see if the offset is valid in the size // of the memset.. - if (MI->getIntrinsicID() == Intrinsic::memset) { + if (const auto *memset_inst = dyn_cast<MemSetInst>(MI)) { if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) { - auto *CI = dyn_cast<ConstantInt>(cast<MemSetInst>(MI)->getValue()); + auto *CI = dyn_cast<ConstantInt>(memset_inst->getValue()); if (!CI || !CI->isZero()) return -1; } |