aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2021-12-25 22:36:56 +0000
committerDimitry Andric <dim@FreeBSD.org>2022-05-14 11:44:01 +0000
commit0eae32dcef82f6f06de6419a0d623d7def0cc8f6 (patch)
tree55b7e05be47b835fd137915bee1e64026c35e71c /contrib/llvm-project/clang/lib
parent4824e7fd18a1223177218d4aec1b3c6c5c4a444e (diff)
parent77fc4c146f0870ffb09c1afb823ccbe742c5e6ff (diff)
downloadsrc-0eae32dcef82f6f06de6419a0d623d7def0cc8f6.tar.gz
src-0eae32dcef82f6f06de6419a0d623d7def0cc8f6.zip
Merge llvm-project main llvmorg-14-init-13186-g0c553cc1af2e
This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp to llvmorg-14-init-13186-g0c553cc1af2e. PR: 261742 MFC after: 2 weeks
Diffstat (limited to 'contrib/llvm-project/clang/lib')
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTContext.cpp106
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp72
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrImpl.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Comment.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclarationName.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Expr.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp98
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/ParentMap.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtProfile.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypePrinter.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h1
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp129
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Cuda.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARC.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.h6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h48
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/XCore.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Version.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Address.h64
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h46
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp199
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGException.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp106
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h26
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGValue.h76
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h34
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp241
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp170
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Job.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChain.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp125
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp (renamed from contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp)198
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h (renamed from contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h)38
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp292
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h103
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp167
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h35
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp134
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h33
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h4
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp193
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp192
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h1
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h184
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hexagon_protos.h11
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hexagon_types.h32
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h1609
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c.h670
-rw-r--r--contrib/llvm-project/clang/lib/Headers/unwind.h3
-rw-r--r--contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td28
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp252
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp242
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp83
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp230
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaModule.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp379
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h98
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp243
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp197
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp2
210 files changed, 6399 insertions, 3171 deletions
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
index 4851c434d765..68ee7c59270e 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
@@ -162,9 +162,7 @@ static bool HasARCRuntime(CompilerInvocation &origCI) {
return triple.getOSMajorVersion() >= 11;
if (triple.getOS() == llvm::Triple::MacOSX) {
- unsigned Major, Minor, Micro;
- triple.getOSVersion(Major, Minor, Micro);
- return Major > 10 || (Major == 10 && Minor >= 7);
+ return triple.getOSVersion() >= VersionTuple(10, 7);
}
return false;
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
index d72ed2af1491..701260881a93 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
@@ -2286,8 +2286,8 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = toBits(Layout.getAlignment());
break;
}
- case Type::ExtInt: {
- const auto *EIT = cast<ExtIntType>(T);
+ case Type::BitInt: {
+ const auto *EIT = cast<BitIntType>(T);
Align =
std::min(static_cast<unsigned>(std::max(
getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
@@ -2349,6 +2349,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::ObjCTypeParam:
return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
+ case Type::Using:
+ return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
+
case Type::Typedef: {
const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
@@ -3569,8 +3572,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
llvm_unreachable("type should never be variably-modified");
// These types can be variably-modified but should never need to
@@ -4482,34 +4485,34 @@ QualType ASTContext::getWritePipeType(QualType T) const {
return getPipeType(T, false);
}
-QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const {
+QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
llvm::FoldingSetNodeID ID;
- ExtIntType::Profile(ID, IsUnsigned, NumBits);
+ BitIntType::Profile(ID, IsUnsigned, NumBits);
void *InsertPos = nullptr;
- if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(EIT, 0);
- auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits);
- ExtIntTypes.InsertNode(New, InsertPos);
+ auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits);
+ BitIntTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
-QualType ASTContext::getDependentExtIntType(bool IsUnsigned,
+QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
Expr *NumBitsExpr) const {
assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
llvm::FoldingSetNodeID ID;
- DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
+ DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
void *InsertPos = nullptr;
- if (DependentExtIntType *Existing =
- DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (DependentBitIntType *Existing =
+ DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(Existing, 0);
auto *New = new (*this, TypeAlignment)
- DependentExtIntType(*this, IsUnsigned, NumBitsExpr);
- DependentExtIntTypes.InsertNode(New, InsertPos);
+ DependentBitIntType(*this, IsUnsigned, NumBitsExpr);
+ DependentBitIntTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -4568,9 +4571,7 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
assert(Enum->isFirstDecl() && "enum has previous declaration");
return getEnumType(Enum);
} else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
- Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
+ return getUnresolvedUsingType(Using);
} else
llvm_unreachable("TypeDecl without a type?");
@@ -4593,6 +4594,27 @@ QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
return QualType(newType, 0);
}
+QualType ASTContext::getUsingType(const UsingShadowDecl *Found,
+ QualType Underlying) const {
+ llvm::FoldingSetNodeID ID;
+ UsingType::Profile(ID, Found);
+
+ void *InsertPos = nullptr;
+ UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ assert(!Underlying.hasLocalQualifiers());
+ assert(Underlying == getTypeDeclType(cast<TypeDecl>(Found->getTargetDecl())));
+ QualType Canon = Underlying.getCanonicalType();
+
+ UsingType *NewType =
+ new (*this, TypeAlignment) UsingType(Found, Underlying, Canon);
+ Types.push_back(NewType);
+ UsingTypes.InsertNode(NewType, InsertPos);
+ return QualType(NewType, 0);
+}
+
QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
@@ -4619,6 +4641,22 @@ QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
return QualType(newType, 0);
}
+QualType ASTContext::getUnresolvedUsingType(
+ const UnresolvedUsingTypenameDecl *Decl) const {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
+ Decl->getCanonicalDecl())
+ if (CanonicalDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
+
+ Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
QualType ASTContext::getAttributedType(attr::Kind attrKind,
QualType modifiedType,
QualType equivalentType) {
@@ -6444,7 +6482,7 @@ unsigned ASTContext::getIntegerRank(const Type *T) const {
// Results in this 'losing' to any type of the same size, but winning if
// larger.
- if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ if (const auto *EIT = dyn_cast<BitIntType>(T))
return 0 + (EIT->getNumBits() << 3);
switch (cast<BuiltinType>(T)->getKind()) {
@@ -7885,7 +7923,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
#define DEPENDENT_TYPE(KIND, BASE) \
@@ -9232,7 +9270,7 @@ void getIntersectionOfProtocols(ASTContext &Context,
// Remove any implied protocols from the list of inherited protocols.
if (!ImpliedProtocols.empty()) {
llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool {
- return ImpliedProtocols.count(proto) > 0;
+ return ImpliedProtocols.contains(proto);
});
}
@@ -10099,12 +10137,12 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
assert(LHS != RHS &&
"Equivalent pipe types should have already been handled!");
return {};
- case Type::ExtInt: {
- // Merge two ext-int types, while trying to preserve typedef info.
- bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned();
- bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned();
- unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits();
- unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits();
+ case Type::BitInt: {
+ // Merge two bit-precise int types, while trying to preserve typedef info.
+ bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
+ bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
+ unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
+ unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
// Like unsigned/int, shouldn't have a type if they don't match.
if (LHSUnsigned != RHSUnsigned)
@@ -10254,7 +10292,7 @@ unsigned ASTContext::getIntWidth(QualType T) const {
T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
- if(const auto *EIT = T->getAs<ExtIntType>())
+ if (const auto *EIT = T->getAs<BitIntType>())
return EIT->getNumBits();
// For builtin types, just use the standard type sizing method
return (unsigned)getTypeSize(T);
@@ -10269,9 +10307,9 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
VTy->getNumElements(), VTy->getVectorKind());
- // For _ExtInt, return an unsigned _ExtInt with same width.
- if (const auto *EITy = T->getAs<ExtIntType>())
- return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits());
+ // For _BitInt, return an unsigned _BitInt with same width.
+ if (const auto *EITy = T->getAs<BitIntType>())
+ return getBitIntType(/*IsUnsigned=*/true, EITy->getNumBits());
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
@@ -10337,9 +10375,9 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
return getVectorType(getCorrespondingSignedType(VTy->getElementType()),
VTy->getNumElements(), VTy->getVectorKind());
- // For _ExtInt, return a signed _ExtInt with same width.
- if (const auto *EITy = T->getAs<ExtIntType>())
- return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits());
+ // For _BitInt, return a signed _BitInt with same width.
+ if (const auto *EITy = T->getAs<BitIntType>())
+ return getBitIntType(/*IsUnsigned=*/false, EITy->getNumBits());
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
index 7e435e8b35b8..724ede272fbf 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
@@ -26,7 +26,8 @@ using namespace clang;
// Returns a desugared version of the QualType, and marks ShouldAKA as true
// whenever we remove significant sugar from the type.
-static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
+QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT,
+ bool &ShouldAKA) {
QualifierCollector QC;
while (true) {
@@ -37,6 +38,11 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
QT = ET->desugar();
continue;
}
+ // ... or a using type ...
+ if (const UsingType *UT = dyn_cast<UsingType>(Ty)) {
+ QT = UT->desugar();
+ continue;
+ }
// ... or a paren type ...
if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
QT = PT->desugar();
@@ -76,7 +82,7 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
if (const FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
bool DesugarReturn = false;
QualType SugarRT = FT->getReturnType();
- QualType RT = Desugar(Context, SugarRT, DesugarReturn);
+ QualType RT = desugarForDiagnostic(Context, SugarRT, DesugarReturn);
if (auto nullability = AttributedType::stripOuterNullability(SugarRT)) {
RT = Context.getAttributedType(
AttributedType::getNullabilityAttrKind(*nullability), RT, RT);
@@ -87,7 +93,7 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT);
if (FPT) {
for (QualType SugarPT : FPT->param_types()) {
- QualType PT = Desugar(Context, SugarPT, DesugarArgument);
+ QualType PT = desugarForDiagnostic(Context, SugarPT, DesugarArgument);
if (auto nullability =
AttributedType::stripOuterNullability(SugarPT)) {
PT = Context.getAttributedType(
@@ -115,7 +121,8 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
for (unsigned I = 0, N = TST->getNumArgs(); I != N; ++I) {
const TemplateArgument &Arg = TST->getArg(I);
if (Arg.getKind() == TemplateArgument::Type)
- Args.push_back(Desugar(Context, Arg.getAsType(), DesugarArgument));
+ Args.push_back(desugarForDiagnostic(Context, Arg.getAsType(),
+ DesugarArgument));
else
Args.push_back(Arg);
}
@@ -129,6 +136,29 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
}
}
+ if (const auto *AT = dyn_cast<ArrayType>(Ty)) {
+ QualType ElementTy =
+ desugarForDiagnostic(Context, AT->getElementType(), ShouldAKA);
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
+ QT = Context.getConstantArrayType(
+ ElementTy, CAT->getSize(), CAT->getSizeExpr(),
+ CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers());
+ else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
+ QT = Context.getVariableArrayType(
+ ElementTy, VAT->getSizeExpr(), VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange());
+ else if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(AT))
+ QT = Context.getDependentSizedArrayType(
+ ElementTy, DSAT->getSizeExpr(), DSAT->getSizeModifier(),
+ DSAT->getIndexTypeCVRQualifiers(), DSAT->getBracketsRange());
+ else if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT))
+ QT = Context.getIncompleteArrayType(ElementTy, IAT->getSizeModifier(),
+ IAT->getIndexTypeCVRQualifiers());
+ else
+ llvm_unreachable("Unhandled array type");
+ break;
+ }
+
// Don't desugar magic Objective-C types.
if (QualType(Ty,0) == Context.getObjCIdType() ||
QualType(Ty,0) == Context.getObjCClassType() ||
@@ -181,24 +211,25 @@ break; \
// If we have a pointer-like type, desugar the pointee as well.
// FIXME: Handle other pointer-like types.
if (const PointerType *Ty = QT->getAs<PointerType>()) {
- QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getPointerType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const auto *Ty = QT->getAs<ObjCObjectPointerType>()) {
- QT = Context.getObjCObjectPointerType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getObjCObjectPointerType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
- QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getLValueReferenceType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) {
- QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getRValueReferenceType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const auto *Ty = QT->getAs<ObjCObjectType>()) {
if (Ty->getBaseType().getTypePtr() != Ty && !ShouldAKA) {
- QualType BaseType = Desugar(Context, Ty->getBaseType(), ShouldAKA);
- QT = Context.getObjCObjectType(BaseType, Ty->getTypeArgsAsWritten(),
- llvm::makeArrayRef(Ty->qual_begin(),
- Ty->getNumProtocols()),
- Ty->isKindOfTypeAsWritten());
+ QualType BaseType =
+ desugarForDiagnostic(Context, Ty->getBaseType(), ShouldAKA);
+ QT = Context.getObjCObjectType(
+ BaseType, Ty->getTypeArgsAsWritten(),
+ llvm::makeArrayRef(Ty->qual_begin(), Ty->getNumProtocols()),
+ Ty->isKindOfTypeAsWritten());
}
}
@@ -251,7 +282,8 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
continue; // Same canonical types
std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy());
bool ShouldAKA = false;
- QualType CompareDesugar = Desugar(Context, CompareTy, ShouldAKA);
+ QualType CompareDesugar =
+ desugarForDiagnostic(Context, CompareTy, ShouldAKA);
std::string CompareDesugarStr =
CompareDesugar.getAsString(Context.getPrintingPolicy());
if (CompareS != S && CompareDesugarStr != S)
@@ -286,7 +318,7 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
// sugar gives us something "significantly different".
if (!Repeated) {
bool ShouldAKA = false;
- QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
+ QualType DesugaredTy = desugarForDiagnostic(Context, Ty, ShouldAKA);
if (ShouldAKA || ForceAKA) {
if (DesugaredTy == Ty) {
DesugaredTy = Ty.getCanonicalType();
@@ -308,7 +340,7 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
OS << "'" << S << "' (vector of " << VTy->getNumElements() << " '"
<< VTy->getElementType().getAsString(Context.getPrintingPolicy())
<< "' " << Values << ")";
- return OS.str();
+ return DecoratedString;
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index 710e40bbb4b7..7f78da10e0b3 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -354,6 +354,7 @@ namespace clang {
ExpectedType VisitTypeOfExprType(const TypeOfExprType *T);
// FIXME: DependentTypeOfExprType
ExpectedType VisitTypeOfType(const TypeOfType *T);
+ ExpectedType VisitUsingType(const UsingType *T);
ExpectedType VisitDecltypeType(const DecltypeType *T);
ExpectedType VisitUnaryTransformType(const UnaryTransformType *T);
ExpectedType VisitAutoType(const AutoType *T);
@@ -1340,6 +1341,17 @@ ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr);
}
+ExpectedType ASTNodeImporter::VisitUsingType(const UsingType *T) {
+ Expected<UsingShadowDecl *> FoundOrErr = import(T->getFoundDecl());
+ if (!FoundOrErr)
+ return FoundOrErr.takeError();
+ Expected<QualType> UnderlyingOrErr = import(T->getUnderlyingType());
+ if (!UnderlyingOrErr)
+ return UnderlyingOrErr.takeError();
+
+ return Importer.getToContext().getUsingType(*FoundOrErr, *UnderlyingOrErr);
+}
+
ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
// FIXME: Make sure that the "to" context supports C++0x!
ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr());
@@ -6066,20 +6078,24 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (Error Err = importInto(TemplatedFD, D->getTemplatedDecl()))
return std::move(Err);
- // Template parameters of the ClassTemplateDecl and FunctionTemplateDecl are
- // shared, if the FunctionTemplateDecl is a deduction guide for the class.
- // At import the ClassTemplateDecl object is always created first (FIXME: is
- // this really true?) because the dependency, then the FunctionTemplateDecl.
- // The DeclContext of the template parameters is changed when the
- // FunctionTemplateDecl is created, but was set already when the class
- // template was created. So here it is not the TU (default value) any more.
- // FIXME: The DeclContext of the parameters is now set finally to the
- // CXXDeductionGuideDecl object that was imported later. This may not be the
- // same that is in the original AST, specially if there are multiple deduction
- // guides.
- DeclContext *OldParamDC = nullptr;
- if (Params->size() > 0)
- OldParamDC = Params->getParam(0)->getDeclContext();
+ // At creation of the template the template parameters are "adopted"
+ // (DeclContext is changed). After this possible change the lookup table
+ // must be updated.
+ // At deduction guides the DeclContext of the template parameters may be
+ // different from what we would expect, it may be the class template, or a
+ // probably different CXXDeductionGuideDecl. This may come from the fact that
+ // the template parameter objects may be shared between deduction guides or
+ // the class template, and at creation of multiple FunctionTemplateDecl
+ // objects (for deduction guides) the same parameters are re-used. The
+ // "adoption" happens multiple times with different parent, even recursively
+ // for TemplateTemplateParmDecl. The same happens at import when the
+ // FunctionTemplateDecl objects are created, but in different order.
+ // In this way the DeclContext of these template parameters is not necessarily
+ // the same as in the "from" context.
+ SmallVector<DeclContext *, 2> OldParamDC;
+ OldParamDC.reserve(Params->size());
+ llvm::transform(*Params, std::back_inserter(OldParamDC),
+ [](NamedDecl *ND) { return ND->getDeclContext(); });
FunctionTemplateDecl *ToFunc;
if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name,
@@ -6091,7 +6107,12 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
ToFunc->setAccess(D->getAccess());
ToFunc->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToFunc);
- updateLookupTableForTemplateParameters(*Params, OldParamDC);
+
+ ASTImporterLookupTable *LT = Importer.SharedState->getLookupTable();
+ if (LT && !OldParamDC.empty()) {
+ for (unsigned int I = 0; I < OldParamDC.size(); ++I)
+ LT->updateForced(Params->getParam(I), OldParamDC[I]);
+ }
if (FoundByLookup) {
auto *Recent =
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
index ef42561c6f94..b7d17a5e92d0 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -140,6 +140,11 @@ void ASTImporterLookupTable::update(NamedDecl *ND, DeclContext *OldDC) {
add(ND);
}
+void ASTImporterLookupTable::updateForced(NamedDecl *ND, DeclContext *OldDC) {
+ LookupTable[OldDC][ND->getDeclName()].remove(ND);
+ add(ND);
+}
+
ASTImporterLookupTable::LookupResult
ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const {
auto DCI = LookupTable.find(DC->getPrimaryContext());
diff --git a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
index 7fd24e2aa9ad..0813a5204a5e 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -945,6 +945,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::Using:
+ if (!IsStructurallyEquivalent(Context, cast<UsingType>(T1)->getFoundDecl(),
+ cast<UsingType>(T2)->getFoundDecl()))
+ return false;
+ break;
+
case Type::Typedef:
if (!IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->getDecl(),
cast<TypedefType>(T2)->getDecl()))
@@ -1205,18 +1211,18 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
cast<PipeType>(T2)->getElementType()))
return false;
break;
- case Type::ExtInt: {
- const auto *Int1 = cast<ExtIntType>(T1);
- const auto *Int2 = cast<ExtIntType>(T2);
+ case Type::BitInt: {
+ const auto *Int1 = cast<BitIntType>(T1);
+ const auto *Int2 = cast<BitIntType>(T2);
if (Int1->isUnsigned() != Int2->isUnsigned() ||
Int1->getNumBits() != Int2->getNumBits())
return false;
break;
}
- case Type::DependentExtInt: {
- const auto *Int1 = cast<DependentExtIntType>(T1);
- const auto *Int2 = cast<DependentExtIntType>(T2);
+ case Type::DependentBitInt: {
+ const auto *Int1 = cast<DependentBitIntType>(T1);
+ const auto *Int2 = cast<DependentBitIntType>(T2);
if (Int1->isUnsigned() != Int2->isUnsigned() ||
!IsStructurallyEquivalent(Context, Int1->getNumBitsExpr(),
diff --git a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
index a3b46752c511..c2f13cf63830 100644
--- a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
@@ -60,7 +60,7 @@ std::string LoopHintAttr::getValueString(const PrintingPolicy &Policy) const {
else
OS << "disable";
OS << ")";
- return OS.str();
+ return ValueName;
}
// Return a string suitable for identifying this attribute in diagnostics.
diff --git a/contrib/llvm-project/clang/lib/AST/Comment.cpp b/contrib/llvm-project/clang/lib/AST/Comment.cpp
index fae3640d5ff7..43820fc566e4 100644
--- a/contrib/llvm-project/clang/lib/AST/Comment.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Comment.cpp
@@ -108,12 +108,7 @@ Comment::child_iterator Comment::child_end() const {
}
bool TextComment::isWhitespaceNoCache() const {
- for (StringRef::const_iterator I = Text.begin(), E = Text.end();
- I != E; ++I) {
- if (!clang::isWhitespace(*I))
- return false;
- }
- return true;
+ return llvm::all_of(Text, clang::isWhitespace);
}
bool ParagraphComment::isWhitespaceNoCache() const {
diff --git a/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp b/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp
index 2a5f7452b776..bf9e17993497 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp
@@ -8,15 +8,12 @@
#include "clang/AST/CommentBriefParser.h"
#include "clang/AST/CommentCommandTraits.h"
+#include "clang/Basic/CharInfo.h"
namespace clang {
namespace comments {
namespace {
-inline bool isWhitespace(char C) {
- return C == ' ' || C == '\n' || C == '\r' ||
- C == '\t' || C == '\f' || C == '\v';
-}
/// Convert all whitespace into spaces, remove leading and trailing spaces,
/// compress multiple spaces into one.
@@ -26,12 +23,11 @@ void cleanupBrief(std::string &S) {
for (std::string::iterator I = S.begin(), E = S.end();
I != E; ++I) {
const char C = *I;
- if (isWhitespace(C)) {
+ if (clang::isWhitespace(C)) {
if (!PrevWasSpace) {
*O++ = ' ';
PrevWasSpace = true;
}
- continue;
} else {
*O++ = C;
PrevWasSpace = false;
@@ -44,12 +40,7 @@ void cleanupBrief(std::string &S) {
}
bool isWhitespace(StringRef Text) {
- for (StringRef::const_iterator I = Text.begin(), E = Text.end();
- I != E; ++I) {
- if (!isWhitespace(*I))
- return false;
- }
- return true;
+ return llvm::all_of(Text, clang::isWhitespace);
}
} // unnamed namespace
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index 8291e721553f..94a644cbe8e5 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -604,8 +604,14 @@ static LinkageInfo getExternalLinkageFor(const NamedDecl *D) {
// - A name declared at namespace scope that does not have internal linkage
// by the previous rules and that is introduced by a non-exported
// declaration has module linkage.
- if (isInModulePurview(D) && !isExportedFromModuleInterfaceUnit(
- cast<NamedDecl>(D->getCanonicalDecl())))
+ //
+ // [basic.namespace.general]/p2
+ // A namespace is never attached to a named module and never has a name with
+ // module linkage.
+ if (isInModulePurview(D) &&
+ !isExportedFromModuleInterfaceUnit(
+ cast<NamedDecl>(D->getCanonicalDecl())) &&
+ !isa<NamespaceDecl>(D))
return LinkageInfo(ModuleLinkage, DefaultVisibility, false);
return LinkageInfo::external();
@@ -1583,7 +1589,7 @@ std::string NamedDecl::getQualifiedNameAsString() const {
std::string QualName;
llvm::raw_string_ostream OS(QualName);
printQualifiedName(OS, getASTContext().getPrintingPolicy());
- return OS.str();
+ return QualName;
}
void NamedDecl::printQualifiedName(raw_ostream &OS) const {
diff --git a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
index 56cf4b457a48..b2232ddfced3 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
@@ -236,7 +236,7 @@ std::string DeclarationName::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
OS << *this;
- return OS.str();
+ return Result;
}
void *DeclarationName::getFETokenInfoSlow() const {
@@ -460,7 +460,7 @@ std::string DeclarationNameInfo::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
OS << *this;
- return OS.str();
+ return Result;
}
raw_ostream &clang::operator<<(raw_ostream &OS, DeclarationNameInfo DNInfo) {
diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp
index d3cb2ff3734c..2530beb89d17 100644
--- a/contrib/llvm-project/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp
@@ -202,6 +202,23 @@ bool Expr::isKnownToHaveBooleanValue(bool Semantic) const {
return false;
}
+const ValueDecl *
+Expr::getAsBuiltinConstantDeclRef(const ASTContext &Context) const {
+ Expr::EvalResult Eval;
+
+ if (EvaluateAsConstantExpr(Eval, Context)) {
+ APValue &Value = Eval.Val;
+
+ if (Value.isMemberPointer())
+ return Value.getMemberPointerDecl();
+
+ if (Value.isLValue() && Value.getLValueOffset().isZero())
+ return Value.getLValueBase().dyn_cast<const ValueDecl *>();
+ }
+
+ return nullptr;
+}
+
// Amusing macro metaprogramming hack: check whether a class provides
// a more specific implementation of getExprLoc().
//
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index 2e458e2c659c..6f979d3264bb 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -1954,11 +1954,12 @@ static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
return true;
}
-/// Should this call expression be treated as a string literal?
-static bool IsStringLiteralCall(const CallExpr *E) {
+/// Should this call expression be treated as a constant?
+static bool IsConstantCall(const CallExpr *E) {
unsigned Builtin = E->getBuiltinCallee();
return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
- Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
+ Builtin == Builtin::BI__builtin___NSStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin_function_start);
}
static bool IsGlobalLValue(APValue::LValueBase B) {
@@ -2004,7 +2005,7 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
case Expr::ObjCBoxedExprClass:
return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer();
case Expr::CallExprClass:
- return IsStringLiteralCall(cast<CallExpr>(E));
+ return IsConstantCall(cast<CallExpr>(E));
// For GCC compatibility, &&label has static storage duration.
case Expr::AddrLabelExprClass:
return true;
@@ -2931,6 +2932,11 @@ handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode,
break;
}
+ // The boolean operations on these vector types use an instruction that
+ // results in a mask of '-1' for the 'truth' value. Ensure that we negate 1
+ // to -1 to make sure that we produce the correct value.
+ Result.negate();
+
return true;
}
@@ -8965,7 +8971,7 @@ bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
}
bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
- if (IsStringLiteralCall(E))
+ if (IsConstantCall(E))
return Success(E);
if (unsigned BuiltinOp = E->getBuiltinCallee())
@@ -10182,7 +10188,8 @@ namespace {
bool VisitInitListExpr(const InitListExpr *E);
bool VisitUnaryImag(const UnaryOperator *E);
bool VisitBinaryOperator(const BinaryOperator *E);
- // FIXME: Missing: unary -, unary ~, conditional operator (for GNU
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ // FIXME: Missing: conditional operator (for GNU
// conditional select), shufflevector, ExtVectorElementExpr
};
} // end anonymous namespace
@@ -10367,6 +10374,83 @@ bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return Success(LHSValue, E);
}
+static llvm::Optional<APValue> handleVectorUnaryOperator(ASTContext &Ctx,
+ QualType ResultTy,
+ UnaryOperatorKind Op,
+ APValue Elt) {
+ switch (Op) {
+ case UO_Plus:
+ // Nothing to do here.
+ return Elt;
+ case UO_Minus:
+ if (Elt.getKind() == APValue::Int) {
+ Elt.getInt().negate();
+ } else {
+ assert(Elt.getKind() == APValue::Float &&
+ "Vector can only be int or float type");
+ Elt.getFloat().changeSign();
+ }
+ return Elt;
+ case UO_Not:
+ // This is only valid for integral types anyway, so we don't have to handle
+ // float here.
+ assert(Elt.getKind() == APValue::Int &&
+ "Vector operator ~ can only be int");
+ Elt.getInt().flipAllBits();
+ return Elt;
+ case UO_LNot: {
+ if (Elt.getKind() == APValue::Int) {
+ Elt.getInt() = !Elt.getInt();
+ // operator ! on vectors returns -1 for 'truth', so negate it.
+ Elt.getInt().negate();
+ return Elt;
+ }
+ assert(Elt.getKind() == APValue::Float &&
+ "Vector can only be int or float type");
+ // Float types result in an int of the same size, but -1 for true, or 0 for
+ // false.
+ APSInt EltResult{Ctx.getIntWidth(ResultTy),
+ ResultTy->isUnsignedIntegerType()};
+ if (Elt.getFloat().isZero())
+ EltResult.setAllBits();
+ else
+ EltResult.clearAllBits();
+
+ return APValue{EltResult};
+ }
+ default:
+ // FIXME: Implement the rest of the unary operators.
+ return llvm::None;
+ }
+}
+
+bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ Expr *SubExpr = E->getSubExpr();
+ const auto *VD = SubExpr->getType()->castAs<VectorType>();
+ // This result element type differs in the case of negating a floating point
+ // vector, since the result type is the a vector of the equivilant sized
+ // integer.
+ const QualType ResultEltTy = VD->getElementType();
+ UnaryOperatorKind Op = E->getOpcode();
+
+ APValue SubExprValue;
+ if (!Evaluate(SubExprValue, Info, SubExpr))
+ return false;
+
+ assert(SubExprValue.getVectorLength() == VD->getNumElements() &&
+ "Vector length doesn't match type?");
+
+ SmallVector<APValue, 4> ResultElements;
+ for (unsigned EltNum = 0; EltNum < VD->getNumElements(); ++EltNum) {
+ llvm::Optional<APValue> Elt = handleVectorUnaryOperator(
+ Info.Ctx, ResultEltTy, Op, SubExprValue.getVectorElt(EltNum));
+ if (!Elt)
+ return false;
+ ResultElements.push_back(*Elt);
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+}
+
//===----------------------------------------------------------------------===//
// Array Evaluation
//===----------------------------------------------------------------------===//
@@ -11080,7 +11164,7 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
// GCC classifies vectors as None. We follow its lead and classify all
// other types that don't fit into the regular classification the same way.
return GCCTypeClass::None;
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
index 07579d04e275..7afc1250a36f 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
@@ -2263,8 +2263,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Atomic:
case Type::Pipe:
case Type::MacroQualified:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
llvm_unreachable("type is illegal as a nested name specifier");
case Type::SubstTemplateTypeParmPack:
@@ -2380,6 +2380,9 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
break;
}
+ case Type::Using:
+ return mangleUnresolvedTypeOrSimpleId(cast<UsingType>(Ty)->desugar(),
+ Prefix);
case Type::Elaborated:
return mangleUnresolvedTypeOrSimpleId(
cast<ElaboratedType>(Ty)->getNamedType(), Prefix);
@@ -3967,26 +3970,20 @@ void CXXNameMangler::mangleType(const PipeType *T) {
Out << "8ocl_pipe";
}
-void CXXNameMangler::mangleType(const ExtIntType *T) {
- Out << "U7_ExtInt";
- llvm::APSInt BW(32, true);
- BW = T->getNumBits();
- TemplateArgument TA(Context.getASTContext(), BW, getASTContext().IntTy);
- mangleTemplateArgs(TemplateName(), &TA, 1);
- if (T->isUnsigned())
- Out << "j";
- else
- Out << "i";
+void CXXNameMangler::mangleType(const BitIntType *T) {
+ // 5.1.5.2 Builtin types
+ // <type> ::= DB <number | instantiation-dependent expression> _
+ // ::= DU <number | instantiation-dependent expression> _
+ Out << "D" << (T->isUnsigned() ? "U" : "B") << T->getNumBits() << "_";
}
-void CXXNameMangler::mangleType(const DependentExtIntType *T) {
- Out << "U7_ExtInt";
- TemplateArgument TA(T->getNumBitsExpr());
- mangleTemplateArgs(TemplateName(), &TA, 1);
- if (T->isUnsigned())
- Out << "j";
- else
- Out << "i";
+void CXXNameMangler::mangleType(const DependentBitIntType *T) {
+ // 5.1.5.2 Builtin types
+ // <type> ::= DB <number | instantiation-dependent expression> _
+ // ::= DU <number | instantiation-dependent expression> _
+ Out << "D" << (T->isUnsigned() ? "U" : "B");
+ mangleExpression(T->getNumBitsExpr());
+ Out << "_";
}
void CXXNameMangler::mangleIntegerLiteral(QualType T,
diff --git a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
index 86879b8c3533..ae585def4d07 100644
--- a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
@@ -744,11 +744,18 @@ void JSONNodeDumper::VisitNamedDecl(const NamedDecl *ND) {
JOS.attribute("name", ND->getNameAsString());
// FIXME: There are likely other contexts in which it makes no sense to ask
// for a mangled name.
- if (!isa<RequiresExprBodyDecl>(ND->getDeclContext())) {
- std::string MangledName = ASTNameGen.getName(ND);
- if (!MangledName.empty())
- JOS.attribute("mangledName", MangledName);
- }
+ if (isa<RequiresExprBodyDecl>(ND->getDeclContext()))
+ return;
+
+ // Mangled names are not meaningful for locals, and may not be well-defined
+ // in the case of VLAs.
+ auto *VD = dyn_cast<VarDecl>(ND);
+ if (VD && VD->hasLocalStorage())
+ return;
+
+ std::string MangledName = ASTNameGen.getName(ND);
+ if (!MangledName.empty())
+ JOS.attribute("mangledName", MangledName);
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
index 79a448a2435c..8802b6e500a6 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
@@ -3365,26 +3365,26 @@ void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD,
return Mangler.mangle(GD);
}
-void MicrosoftCXXNameMangler::mangleType(const ExtIntType *T, Qualifiers,
+void MicrosoftCXXNameMangler::mangleType(const BitIntType *T, Qualifiers,
SourceRange Range) {
llvm::SmallString<64> TemplateMangling;
llvm::raw_svector_ostream Stream(TemplateMangling);
MicrosoftCXXNameMangler Extra(Context, Stream);
Stream << "?$";
if (T->isUnsigned())
- Extra.mangleSourceName("_UExtInt");
+ Extra.mangleSourceName("_UBitInt");
else
- Extra.mangleSourceName("_ExtInt");
+ Extra.mangleSourceName("_BitInt");
Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumBits()));
mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
}
-void MicrosoftCXXNameMangler::mangleType(const DependentExtIntType *T,
+void MicrosoftCXXNameMangler::mangleType(const DependentBitIntType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
- DiagnosticsEngine::Error, "cannot mangle this DependentExtInt type yet");
+ DiagnosticsEngine::Error, "cannot mangle this DependentBitInt type yet");
Diags.Report(Range.getBegin(), DiagID) << Range;
}
diff --git a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
index f721e56f7fdd..1bd049b88005 100644
--- a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
@@ -126,6 +126,7 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -217,6 +218,7 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -1792,6 +1794,10 @@ void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
OS << "capture";
}
+void OMPClausePrinter::VisitOMPCompareClause(OMPCompareClause *) {
+ OS << "compare";
+}
+
void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
OS << "seq_cst";
}
@@ -2454,7 +2460,7 @@ std::string OMPTraitInfo::getMangledName() const {
Property.RawString);
}
}
- return OS.str();
+ return MangledName;
}
OMPTraitInfo::OMPTraitInfo(StringRef MangledName) {
diff --git a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
index 2ff5c9d8aeb5..da21e573c320 100644
--- a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
@@ -133,8 +133,7 @@ void ParentMap::setParent(const Stmt *S, const Stmt *Parent) {
Stmt* ParentMap::getParent(Stmt* S) const {
MapTy* M = (MapTy*) Impl;
- MapTy::iterator I = M->find(S);
- return I == M->end() ? nullptr : I->second;
+ return M->lookup(S);
}
Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
diff --git a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
index 673821078345..561757b1ba64 100644
--- a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
+++ b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
@@ -418,6 +418,13 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
return QT;
}
+ // We don't consider the alias introduced by `using a::X` as a new type.
+ // The qualified name is still a::X.
+ if (isa<UsingType>(QT.getTypePtr())) {
+ return getFullyQualifiedType(QT.getSingleStepDesugaredType(Ctx), Ctx,
+ WithGlobalNsPrefix);
+ }
+
// Remove the part of the type related to the type being a template
// parameter (we won't report it as part of the 'type name' and it
// is actually make the code below to be more complex (to handle
diff --git a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
index 4339c249e027..09853e0f0e49 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
@@ -551,6 +551,8 @@ void OMPClauseProfiler::VisitOMPUpdateClause(const OMPUpdateClause *) {}
void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
+void OMPClauseProfiler::VisitOMPCompareClause(const OMPCompareClause *) {}
+
void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
void OMPClauseProfiler::VisitOMPAcqRelClause(const OMPAcqRelClause *) {}
diff --git a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
index b21e806e307c..67c934847c7f 100644
--- a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
@@ -1534,6 +1534,10 @@ void TextNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
dumpDeclRef(T->getDecl());
}
+void TextNodeDumper::VisitUsingType(const UsingType *T) {
+ dumpDeclRef(T->getFoundDecl());
+}
+
void TextNodeDumper::VisitTypedefType(const TypedefType *T) {
dumpDeclRef(T->getDecl());
}
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index e0ac3f5b1351..c771fe264b0c 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -338,25 +338,25 @@ VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
VectorTypeBits.NumElements = nElements;
}
-ExtIntType::ExtIntType(bool IsUnsigned, unsigned NumBits)
- : Type(ExtInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned),
+BitIntType::BitIntType(bool IsUnsigned, unsigned NumBits)
+ : Type(BitInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned),
NumBits(NumBits) {}
-DependentExtIntType::DependentExtIntType(const ASTContext &Context,
+DependentBitIntType::DependentBitIntType(const ASTContext &Context,
bool IsUnsigned, Expr *NumBitsExpr)
- : Type(DependentExtInt, QualType{},
+ : Type(DependentBitInt, QualType{},
toTypeDependence(NumBitsExpr->getDependence())),
Context(Context), ExprAndUnsigned(NumBitsExpr, IsUnsigned) {}
-bool DependentExtIntType::isUnsigned() const {
+bool DependentBitIntType::isUnsigned() const {
return ExprAndUnsigned.getInt();
}
-clang::Expr *DependentExtIntType::getNumBitsExpr() const {
+clang::Expr *DependentBitIntType::getNumBitsExpr() const {
return ExprAndUnsigned.getPointer();
}
-void DependentExtIntType::Profile(llvm::FoldingSetNodeID &ID,
+void DependentBitIntType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context, bool IsUnsigned,
Expr *NumBitsExpr) {
ID.AddBoolean(IsUnsigned);
@@ -1932,7 +1932,7 @@ bool Type::isIntegralType(const ASTContext &Ctx) const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete();
- return isExtIntType();
+ return isBitIntType();
}
bool Type::isIntegralOrUnscopedEnumerationType() const {
@@ -1940,7 +1940,7 @@ bool Type::isIntegralOrUnscopedEnumerationType() const {
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
- if (isExtIntType())
+ if (isBitIntType())
return true;
return isUnscopedEnumerationType();
@@ -2023,7 +2023,9 @@ bool Type::isSignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isSigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isSigned();
return false;
@@ -2040,9 +2042,10 @@ bool Type::isSignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isSigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isSigned();
-
return false;
}
@@ -2070,7 +2073,9 @@ bool Type::isUnsignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isUnsigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isUnsigned();
return false;
@@ -2087,7 +2092,9 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isUnsigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isUnsigned();
return false;
@@ -2129,7 +2136,7 @@ bool Type::isRealType() const {
BT->getKind() <= BuiltinType::Ibm128;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
- return isExtIntType();
+ return isBitIntType();
}
bool Type::isArithmeticType() const {
@@ -2145,7 +2152,7 @@ bool Type::isArithmeticType() const {
// false for scoped enumerations since that will disable any
// unwanted implicit conversions.
return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
- return isa<ComplexType>(CanonicalType) || isExtIntType();
+ return isa<ComplexType>(CanonicalType) || isBitIntType();
}
Type::ScalarTypeKind Type::getScalarTypeKind() const {
@@ -2174,7 +2181,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
if (CT->getElementType()->isRealFloatingType())
return STK_FloatingComplex;
return STK_IntegralComplex;
- } else if (isExtIntType()) {
+ } else if (isBitIntType()) {
return STK_Integral;
}
@@ -2381,7 +2388,7 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
- case Type::ExtInt:
+ case Type::BitInt:
return true;
case Type::Enum:
@@ -3400,6 +3407,17 @@ QualType TypedefType::desugar() const {
return getDecl()->getUnderlyingType();
}
+UsingType::UsingType(const UsingShadowDecl *Found, QualType Underlying,
+ QualType Canon)
+ : Type(Using, Canon, Underlying->getDependence()),
+ Found(const_cast<UsingShadowDecl *>(Found)) {
+ assert(Underlying == getUnderlyingType());
+}
+
+QualType UsingType::getUnderlyingType() const {
+ return QualType(cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(), 0);
+}
+
QualType MacroQualifiedType::desugar() const { return getUnderlyingType(); }
QualType MacroQualifiedType::getModifiedType() const {
@@ -3849,7 +3867,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
// here in error recovery.
return CachedProperties(ExternalLinkage, false);
- case Type::ExtInt:
+ case Type::BitInt:
case Type::Builtin:
// C++ [basic.link]p8:
// A type is said to have linkage if and only if:
@@ -3949,7 +3967,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
assert(T->isInstantiationDependentType());
return LinkageInfo::external();
- case Type::ExtInt:
+ case Type::BitInt:
case Type::Builtin:
return LinkageInfo::external();
@@ -4169,8 +4187,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
return false;
}
llvm_unreachable("bad type kind!");
diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
index eca9af3e5f36..2a33a69f288d 100644
--- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
@@ -212,6 +212,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::Builtin:
case Type::Complex:
case Type::UnresolvedUsing:
+ case Type::Using:
case Type::Typedef:
case Type::TypeOfExpr:
case Type::TypeOf:
@@ -232,8 +233,8 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
CanPrefixQualifiers = true;
break;
@@ -1046,6 +1047,21 @@ void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
raw_ostream &OS) {}
+void TypePrinter::printUsingBefore(const UsingType *T, raw_ostream &OS) {
+ // After `namespace b { using a::X }`, is the type X within B a::X or b::X?
+ //
+ // - b::X is more formally correct given the UsingType model
+ // - b::X makes sense if "re-exporting" a symbol in a new namespace
+ // - a::X makes sense if "importing" a symbol for convenience
+ //
+ // The "importing" use seems much more common, so we print a::X.
+ // This could be a policy option, but the right choice seems to rest more
+ // with the intent of the code than the caller.
+ printTypeSpec(T->getFoundDecl()->getUnderlyingDecl(), OS);
+}
+
+void TypePrinter::printUsingAfter(const UsingType *T, raw_ostream &OS) {}
+
void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
printTypeSpec(T->getDecl(), OS);
}
@@ -1200,26 +1216,26 @@ void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) {
void TypePrinter::printPipeAfter(const PipeType *T, raw_ostream &OS) {}
-void TypePrinter::printExtIntBefore(const ExtIntType *T, raw_ostream &OS) {
+void TypePrinter::printBitIntBefore(const BitIntType *T, raw_ostream &OS) {
if (T->isUnsigned())
OS << "unsigned ";
- OS << "_ExtInt(" << T->getNumBits() << ")";
+ OS << "_BitInt(" << T->getNumBits() << ")";
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printExtIntAfter(const ExtIntType *T, raw_ostream &OS) {}
+void TypePrinter::printBitIntAfter(const BitIntType *T, raw_ostream &OS) {}
-void TypePrinter::printDependentExtIntBefore(const DependentExtIntType *T,
+void TypePrinter::printDependentBitIntBefore(const DependentBitIntType *T,
raw_ostream &OS) {
if (T->isUnsigned())
OS << "unsigned ";
- OS << "_ExtInt(";
+ OS << "_BitInt(";
T->getNumBitsExpr()->printPretty(OS, nullptr, Policy);
OS << ")";
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printDependentExtIntAfter(const DependentExtIntType *T,
+void TypePrinter::printDependentBitIntAfter(const DependentBitIntType *T,
raw_ostream &OS) {}
/// Appends the given scope to the end of a string.
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 7680eb38283e..815058f32de4 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -1059,6 +1059,7 @@ const AstTypeMatcher<UnaryTransformType> unaryTransformType;
const AstTypeMatcher<RecordType> recordType;
const AstTypeMatcher<TagType> tagType;
const AstTypeMatcher<ElaboratedType> elaboratedType;
+const AstTypeMatcher<UsingType> usingType;
const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType;
const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
index ba2f49e6b623..41ab0ed70fda 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -204,7 +204,7 @@ std::string Diagnostics::toString() const {
std::string S;
llvm::raw_string_ostream OS(S);
printToStream(OS);
- return OS.str();
+ return S;
}
void Diagnostics::printToStreamFull(llvm::raw_ostream &OS) const {
@@ -223,7 +223,7 @@ std::string Diagnostics::toStringFull() const {
std::string S;
llvm::raw_string_ostream OS(S);
printToStreamFull(OS);
- return OS.str();
+ return S;
}
} // namespace dynamic
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 783fb203c408..fa9d42247e24 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -1035,7 +1035,6 @@ public:
void getArgKinds(ASTNodeKind ThisKind, unsigned,
std::vector<ArgKind> &ArgKinds) const override {
ArgKinds.push_back(ArgKind::MakeNodeArg(ThisKind));
- return;
}
bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity = nullptr,
ASTNodeKind *LeastDerivedKind = nullptr) const override {
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
index c6a77bb6c2e0..cab1476acf94 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -645,7 +645,7 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
Tokenizer->SkipNewlines();
{
- ScopedContextEntry SCE(this, Ctor ? *Ctor : nullptr);
+ ScopedContextEntry SCE(this, Ctor.getValueOr(nullptr));
while (Tokenizer->nextTokenKind() != TokenInfo::TK_Eof) {
if (Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen) {
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 878547923d27..4f3efdb0a663 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -228,6 +228,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(eachOf);
REGISTER_MATCHER(elaboratedType);
REGISTER_MATCHER(elaboratedTypeLoc);
+ REGISTER_MATCHER(usingType);
REGISTER_MATCHER(enumConstantDecl);
REGISTER_MATCHER(enumDecl);
REGISTER_MATCHER(enumType);
diff --git a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
index d8466ac34a3d..06f1f813aeed 100644
--- a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -387,7 +387,7 @@ std::string AnalysisDeclContext::getFunctionName(const Decl *D) {
OS << ' ' << OMD->getSelector().getAsString() << ']';
}
- return OS.str();
+ return Str;
}
LocationContextManager &AnalysisDeclContext::getLocationContextManager() {
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index abf65e3efce9..9ef3b5b6277a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -1820,8 +1820,6 @@ void CFGBuilder::addScopesEnd(LocalScope::const_iterator B,
for (VarDecl *VD : llvm::reverse(DeclsWithEndedScope))
appendScopeEnd(Block, VD, S);
-
- return;
}
/// addAutomaticObjDtors - Add to current block automatic objects destructors
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
index bb7eb9971068..413e8d14bf0a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
@@ -11,15 +11,82 @@
//
//===----------------------------------------------------------------------===//
+#include <utility>
#include <vector>
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/Support/raw_ostream.h"
-using namespace clang;
-using namespace dataflow;
+namespace clang {
+namespace dataflow {
+
+/// Computes the input state for a given basic block by joining the output
+/// states of its predecessors.
+///
+/// Requirements:
+///
+/// All predecessors of `Block` except those with loop back edges must have
+/// already been transferred. States in `BlockStates` that are set to
+/// `llvm::None` represent basic blocks that are not evaluated yet.
+static TypeErasedDataflowAnalysisState computeBlockInputState(
+ std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>> &BlockStates,
+ const CFGBlock &Block, const Environment &InitEnv,
+ TypeErasedDataflowAnalysis &Analysis) {
+ // FIXME: Consider passing `Block` to `Analysis.typeErasedInitialElement()`
+ // to enable building analyses like computation of dominators that initialize
+ // the state of each basic block differently.
+ TypeErasedDataflowAnalysisState State = {Analysis.typeErasedInitialElement(),
+ InitEnv};
+ for (const CFGBlock *Pred : Block.preds()) {
+ // Skip if the `Block` is unreachable or control flow cannot get past it.
+ if (!Pred || Pred->hasNoReturnElement())
+ continue;
+
+ // Skip if `Pred` was not evaluated yet. This could happen if `Pred` has a
+ // loop back edge to `Block`.
+ const llvm::Optional<TypeErasedDataflowAnalysisState> &MaybePredState =
+ BlockStates[Pred->getBlockID()];
+ if (!MaybePredState.hasValue())
+ continue;
+
+ const TypeErasedDataflowAnalysisState &PredState =
+ MaybePredState.getValue();
+ Analysis.joinTypeErased(State.Lattice, PredState.Lattice);
+ State.Env.join(PredState.Env);
+ }
+ return State;
+}
+
+TypeErasedDataflowAnalysisState transferBlock(
+ std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>> &BlockStates,
+ const CFGBlock &Block, const Environment &InitEnv,
+ TypeErasedDataflowAnalysis &Analysis,
+ std::function<void(const CFGStmt &,
+ const TypeErasedDataflowAnalysisState &)>
+ HandleTransferredStmt) {
+ TypeErasedDataflowAnalysisState State =
+ computeBlockInputState(BlockStates, Block, InitEnv, Analysis);
+ for (const CFGElement &Element : Block) {
+ // FIXME: Evaluate other kinds of `CFGElement`.
+ const llvm::Optional<CFGStmt> Stmt = Element.getAs<CFGStmt>();
+ if (!Stmt.hasValue())
+ continue;
+
+ // FIXME: Evaluate the statement contained in `Stmt`.
+
+ State.Lattice = Analysis.transferTypeErased(Stmt.getValue().getStmt(),
+ State.Lattice, State.Env);
+ if (HandleTransferredStmt != nullptr)
+ HandleTransferredStmt(Stmt.getValue(), State);
+ }
+ return State;
+}
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>
runTypeErasedDataflowAnalysis(const CFG &Cfg,
@@ -29,7 +96,59 @@ runTypeErasedDataflowAnalysis(const CFG &Cfg,
// are specified in the header. This could be done by remembering
// what options were used to build `Cfg` and asserting on them here.
- // FIXME: Implement work list-based algorithm to compute the fixed
- // point of `Analysis::transform` for every basic block in `Cfg`.
- return {};
+ PostOrderCFGView POV(&Cfg);
+ ForwardDataflowWorklist Worklist(Cfg, &POV);
+
+ std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>> BlockStates;
+ BlockStates.resize(Cfg.size(), llvm::None);
+
+ // The entry basic block doesn't contain statements so it can be skipped.
+ const CFGBlock &Entry = Cfg.getEntry();
+ BlockStates[Entry.getBlockID()] = {Analysis.typeErasedInitialElement(),
+ InitEnv};
+ Worklist.enqueueSuccessors(&Entry);
+
+ // Bugs in lattices and transfer functions can prevent the analysis from
+ // converging. To limit the damage (infinite loops) that these bugs can cause,
+ // limit the number of iterations.
+ // FIXME: Consider making the maximum number of iterations configurable.
+ // FIXME: Set up statistics (see llvm/ADT/Statistic.h) to count average number
+ // of iterations, number of functions that time out, etc.
+ unsigned Iterations = 0;
+ static constexpr unsigned MaxIterations = 1 << 16;
+ while (const CFGBlock *Block = Worklist.dequeue()) {
+ if (++Iterations > MaxIterations) {
+ llvm::errs() << "Maximum number of iterations reached, giving up.\n";
+ break;
+ }
+
+ const llvm::Optional<TypeErasedDataflowAnalysisState> &OldBlockState =
+ BlockStates[Block->getBlockID()];
+ TypeErasedDataflowAnalysisState NewBlockState =
+ transferBlock(BlockStates, *Block, InitEnv, Analysis);
+
+ if (OldBlockState.hasValue() &&
+ Analysis.isEqualTypeErased(OldBlockState.getValue().Lattice,
+ NewBlockState.Lattice) &&
+ OldBlockState->Env == NewBlockState.Env) {
+ // The state of `Block` didn't change after transfer so there's no need to
+ // revisit its successors.
+ continue;
+ }
+
+ BlockStates[Block->getBlockID()] = std::move(NewBlockState);
+
+ // Do not add unreachable successor blocks to `Worklist`.
+ if (Block->hasNoReturnElement())
+ continue;
+
+ Worklist.enqueueSuccessors(Block);
+ }
+ // FIXME: Consider evaluating unreachable basic blocks (those that have a
+ // state set to `llvm::None` at this point) to also analyze dead code.
+
+ return BlockStates;
}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
index b196ffa73cbf..9cc990bd35a3 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
@@ -418,7 +418,6 @@ public:
private:
Context::Factory ContextFactory;
std::vector<VarDefinition> VarDefinitions;
- std::vector<unsigned> CtxIndices;
std::vector<std::pair<const Stmt *, Context>> SavedContexts;
public:
@@ -731,8 +730,6 @@ void LocalVariableMap::traverseCFG(CFG *CFGraph,
std::vector<CFGBlockInfo> &BlockInfo) {
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
- CtxIndices.resize(CFGraph->getNumBlockIDs());
-
for (const auto *CurrBlock : *SortedGraph) {
unsigned CurrBlockID = CurrBlock->getBlockID();
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
diff --git a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
index 67cd39728c35..a38ae34f4b81 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
@@ -591,8 +591,8 @@ public:
if (AtPredExit == MayUninitialized) {
// If the predecessor's terminator is an "asm goto" that initializes
- // the variable, then it won't be counted as "initialized" on the
- // non-fallthrough paths.
+ // the variable, then don't count it as "initialized" on the indirect
+ // paths.
CFGTerminator term = Pred->getTerminator();
if (const auto *as = dyn_cast_or_null<GCCAsmStmt>(term.getStmt())) {
const CFGBlock *fallthrough = *Pred->succ_begin();
@@ -810,13 +810,22 @@ void TransferFunctions::VisitGCCAsmStmt(GCCAsmStmt *as) {
if (!as->isAsmGoto())
return;
- for (const Expr *o : as->outputs())
- if (const VarDecl *VD = findVar(o).getDecl())
+ ASTContext &C = ac.getASTContext();
+ for (const Expr *O : as->outputs()) {
+ const Expr *Ex = stripCasts(C, O);
+
+ // Strip away any unary operators. Invalid l-values are reported by other
+ // semantic analysis passes.
+ while (const auto *UO = dyn_cast<UnaryOperator>(Ex))
+ Ex = stripCasts(C, UO->getSubExpr());
+
+ if (const VarDecl *VD = findVar(Ex).getDecl())
if (vals[VD] != Initialized)
// If the variable isn't initialized by the time we get here, then we
// mark it as potentially uninitialized for those cases where it's used
// on an indirect path, where it's not guaranteed to be defined.
vals[VD] = MayUninitialized;
+ }
}
void TransferFunctions::VisitObjCMessageExpr(ObjCMessageExpr *ME) {
diff --git a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
index e82a3a705e70..2d75578b3de0 100644
--- a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
@@ -123,6 +123,7 @@ static const CudaArchToStringMap arch_names[] = {
GFX(1033), // gfx1033
GFX(1034), // gfx1034
GFX(1035), // gfx1035
+ {CudaArch::Generic, "generic", ""},
// clang-format on
};
#undef SM
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
index 9e74e05bd863..1761c6d3d89b 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
@@ -163,6 +163,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -428,6 +429,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp b/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp
index 6986fcd322f2..6e5e55fb09ce 100644
--- a/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp
@@ -90,7 +90,7 @@ SourceLocation::printToString(const SourceManager &SM) const {
std::string S;
llvm::raw_string_ostream OS(S);
print(OS, SM);
- return OS.str();
+ return S;
}
LLVM_DUMP_METHOD void SourceLocation::dump(const SourceManager &SM) const {
@@ -149,7 +149,7 @@ SourceRange::printToString(const SourceManager &SM) const {
std::string S;
llvm::raw_string_ostream OS(S);
print(OS, SM);
- return OS.str();
+ return S;
}
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index 4d403ae1809d..4089a393b762 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -307,6 +307,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (FPU & SveMode)
Builder.defineMacro("__ARM_FEATURE_SVE", "1");
+ if ((FPU & NeonMode) && (FPU & SveMode))
+ Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
+
if (HasSVE2)
Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
@@ -474,10 +477,12 @@ ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
Optional<std::pair<unsigned, unsigned>>
AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
if (LangOpts.VScaleMin || LangOpts.VScaleMax)
- return std::pair<unsigned, unsigned>(LangOpts.VScaleMin,
- LangOpts.VScaleMax);
+ return std::pair<unsigned, unsigned>(
+ LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
+
if (hasFeature("sve"))
- return std::pair<unsigned, unsigned>(0, 16);
+ return std::pair<unsigned, unsigned>(1, 16);
+
return None;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
index dea59a9b015d..74745df3be8d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
@@ -150,7 +150,7 @@ public:
const char *getBFloat16Mangling() const override { return "u6__bf16"; };
bool hasInt128Type() const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY AArch64leTargetInfo : public AArch64TargetInfo {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
index 8b9d7ce79c16..974922191488 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
@@ -426,7 +426,7 @@ public:
void setAuxTarget(const TargetInfo *Aux) override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
// Record offload arch features since they are needed for defining the
// pre-defined macros.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
index 3c0c5f6df2f4..5411cd2cd869 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
@@ -66,7 +66,7 @@ public:
return false;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
bool isCLZForZeroUndef() const override { return false; }
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index f330780300f2..c619d6cde41d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -465,6 +465,8 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HWDiv = 0;
DotProd = 0;
HasMatMul = 0;
+ HasPAC = 0;
+ HasBTI = 0;
HasFloat16 = true;
ARMCDECoprocMask = 0;
HasBFloat16 = false;
@@ -547,6 +549,9 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasBFloat16 = true;
} else if (Feature == "-fpregs") {
FPRegsDisabled = true;
+ } else if (Feature == "+pacbti") {
+ HasPAC = 1;
+ HasBTI = 1;
}
}
@@ -890,6 +895,12 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasMatMul)
Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
+ if (HasPAC)
+ Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
+
+ if (HasBTI)
+ Builder.defineMacro("__ARM_FEATURE_BTI", "1");
+
if (HasBFloat16) {
Builder.defineMacro("__ARM_FEATURE_BF16", "1");
Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
@@ -900,7 +911,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
if (Opts.hasSignReturnAddress()) {
- unsigned Value = Opts.isSignReturnAddressWithAKey() ? 1 : 2;
+ unsigned Value = 1;
if (Opts.isSignReturnAddressScopeAll())
Value |= 1 << 2;
Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", Twine(Value));
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
index 7d0011d134ea..40c658f3f40e 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
@@ -79,6 +79,8 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned DotProd : 1;
unsigned HasMatMul : 1;
unsigned FPRegsDisabled : 1;
+ unsigned HasPAC : 1;
+ unsigned HasBTI : 1;
enum {
LDREX_B = (1 << 0), /// byte (8-bit)
@@ -191,8 +193,8 @@ public:
bool hasSjLjLowering() const override;
- bool hasExtIntType() const override { return true; }
-
+ bool hasBitIntType() const override { return true; }
+
const char *getBFloat16Mangling() const override { return "u6__bf16"; };
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
index 9c37dee7e89a..161369242926 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
@@ -68,6 +68,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
} else if (CPU == "hexagonv68") {
Builder.defineMacro("__HEXAGON_V68__");
Builder.defineMacro("__HEXAGON_ARCH__", "68");
+ } else if (CPU == "hexagonv69") {
+ Builder.defineMacro("__HEXAGON_V69__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "69");
}
if (hasFeature("hvx-length64b")) {
@@ -128,6 +131,10 @@ bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
else if (F == "+audio")
HasAudio = true;
}
+ if (CPU.compare("hexagonv68") >= 0) {
+ HasLegalHalfType = true;
+ HasFloat16 = true;
+ }
return true;
}
@@ -214,7 +221,7 @@ static constexpr CPUSuffix Suffixes[] = {
{{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
{{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
{{"hexagonv67"}, {"67"}}, {{"hexagonv67t"}, {"67t"}},
- {{"hexagonv68"}, {"68"}},
+ {{"hexagonv68"}, {"68"}}, {{"hexagonv69"}, {"69"}},
};
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
index d6c7da5f1e40..94441998f355 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
@@ -139,7 +139,7 @@ public:
return CPU.find('t') != std::string::npos;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
index 9af5427b81c4..56c6cced938a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
@@ -87,7 +87,7 @@ public:
const char *getClobbers() const override { return ""; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
index b475c03889a1..b54d36e1c95f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
@@ -406,7 +406,7 @@ public:
unsigned getUnwindWordWidth() const override;
bool validateTarget(DiagnosticsEngine &Diags) const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
index 3561b22677bc..75e82d819900 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
@@ -215,6 +215,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX1033:
case CudaArch::GFX1034:
case CudaArch::GFX1035:
+ case CudaArch::Generic:
case CudaArch::LAST:
break;
case CudaArch::UNUSED:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
index ef751b8e1a8d..589f24f4bb03 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
@@ -121,7 +121,7 @@ public:
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override {
for (int i = static_cast<int>(CudaArch::SM_20);
- i < static_cast<int>(CudaArch::LAST); ++i)
+ i < static_cast<int>(CudaArch::Generic); ++i)
Values.emplace_back(CudaArchToString(static_cast<CudaArch>(i)));
}
@@ -175,7 +175,7 @@ public:
return CCCR_Warning;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
index 53748bf067cd..f8f12daaa072 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
@@ -48,12 +48,12 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("_REENTRANT");
// Get the platform type and version number from the triple.
- unsigned Maj, Min, Rev;
+ VersionTuple OsVersion;
if (Triple.isMacOSX()) {
- Triple.getMacOSXVersion(Maj, Min, Rev);
+ Triple.getMacOSXVersion(OsVersion);
PlatformName = "macos";
} else {
- Triple.getOSVersion(Maj, Min, Rev);
+ OsVersion = Triple.getOSVersion();
PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
if (PlatformName == "ios" && Triple.isMacCatalystEnvironment())
PlatformName = "maccatalyst";
@@ -63,29 +63,29 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
// generating code for Win32 ABI. No need to emit
// __ENVIRONMENT_XX_OS_VERSION_MIN_REQUIRED__.
if (PlatformName == "win32") {
- PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ PlatformMinVersion = OsVersion;
return;
}
// Set the appropriate OS version define.
if (Triple.isiOS()) {
- assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
+ assert(OsVersion < VersionTuple(100) && "Invalid version!");
char Str[7];
- if (Maj < 10) {
- Str[0] = '0' + Maj;
- Str[1] = '0' + (Min / 10);
- Str[2] = '0' + (Min % 10);
- Str[3] = '0' + (Rev / 10);
- Str[4] = '0' + (Rev % 10);
+ if (OsVersion.getMajor() < 10) {
+ Str[0] = '0' + OsVersion.getMajor();
+ Str[1] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
+ Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
+ Str[3] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
+ Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
Str[5] = '\0';
} else {
// Handle versions >= 10.
- Str[0] = '0' + (Maj / 10);
- Str[1] = '0' + (Maj % 10);
- Str[2] = '0' + (Min / 10);
- Str[3] = '0' + (Min % 10);
- Str[4] = '0' + (Rev / 10);
- Str[5] = '0' + (Rev % 10);
+ Str[0] = '0' + (OsVersion.getMajor() / 10);
+ Str[1] = '0' + (OsVersion.getMajor() % 10);
+ Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
+ Str[3] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
+ Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
+ Str[5] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
Str[6] = '\0';
}
if (Triple.isTvOS())
@@ -95,13 +95,13 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Str);
} else if (Triple.isWatchOS()) {
- assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
+ assert(OsVersion < VersionTuple(10) && "Invalid version!");
char Str[6];
- Str[0] = '0' + Maj;
- Str[1] = '0' + (Min / 10);
- Str[2] = '0' + (Min % 10);
- Str[3] = '0' + (Rev / 10);
- Str[4] = '0' + (Rev % 10);
+ Str[0] = '0' + OsVersion.getMajor();
+ Str[1] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
+ Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
+ Str[3] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
+ Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
Str[5] = '\0';
Builder.defineMacro("__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__", Str);
} else if (Triple.isMacOSX()) {
@@ -109,22 +109,22 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
// define (because we only get a single digit for the minor and micro
// revision numbers). So, we limit them to the maximum representable
// version.
- assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
+ assert(OsVersion < VersionTuple(100) && "Invalid version!");
char Str[7];
- if (Maj < 10 || (Maj == 10 && Min < 10)) {
- Str[0] = '0' + (Maj / 10);
- Str[1] = '0' + (Maj % 10);
- Str[2] = '0' + std::min(Min, 9U);
- Str[3] = '0' + std::min(Rev, 9U);
+ if (OsVersion < VersionTuple(10, 10)) {
+ Str[0] = '0' + (OsVersion.getMajor() / 10);
+ Str[1] = '0' + (OsVersion.getMajor() % 10);
+ Str[2] = '0' + std::min(OsVersion.getMinor().getValueOr(0), 9U);
+ Str[3] = '0' + std::min(OsVersion.getSubminor().getValueOr(0), 9U);
Str[4] = '\0';
} else {
// Handle versions > 10.9.
- Str[0] = '0' + (Maj / 10);
- Str[1] = '0' + (Maj % 10);
- Str[2] = '0' + (Min / 10);
- Str[3] = '0' + (Min % 10);
- Str[4] = '0' + (Rev / 10);
- Str[5] = '0' + (Rev % 10);
+ Str[0] = '0' + (OsVersion.getMajor() / 10);
+ Str[1] = '0' + (OsVersion.getMajor() % 10);
+ Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
+ Str[3] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
+ Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
+ Str[5] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
Str[6] = '\0';
}
Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
@@ -134,7 +134,7 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
if (Triple.isOSDarwin())
Builder.defineMacro("__MACH__");
- PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ PlatformMinVersion = OsVersion;
}
static void addMinGWDefines(const llvm::Triple &Triple, const LangOptions &Opts,
@@ -203,6 +203,7 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
}
Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
+ Builder.defineMacro("__STDC_NO_THREADS__");
// Starting with VS 2022 17.1, MSVC predefines the below macro to inform
// users of the execution character set defined at compile time.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
index 7fbe2cbc5653..3c1830d5f8e8 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
@@ -148,9 +148,7 @@ public:
return 64;
}
- unsigned Major, Minor, Micro;
- T.getOSVersion(Major, Minor, Micro);
- if (llvm::VersionTuple(Major, Minor, Micro) < MinVersion)
+ if (T.getOSVersion() < MinVersion)
return 64;
return OSTargetInfo<Target>::getExnObjectAlignment();
}
@@ -294,7 +292,7 @@ protected:
Builder.defineMacro("__HAIKU__");
Builder.defineMacro("__ELF__");
DefineStd(Builder, "unix", Opts);
- if (this->HasFloat128)
+ if (this->HasFloat128)
Builder.defineMacro("__FLOAT128__");
}
@@ -376,10 +374,9 @@ protected:
Builder.defineMacro("__ELF__");
if (Triple.isAndroid()) {
Builder.defineMacro("__ANDROID__", "1");
- unsigned Maj, Min, Rev;
- Triple.getEnvironmentVersion(Maj, Min, Rev);
this->PlatformName = "android";
- this->PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ this->PlatformMinVersion = Triple.getEnvironmentVersion();
+ const unsigned Maj = this->PlatformMinVersion.getMajor();
if (Maj) {
Builder.defineMacro("__ANDROID_MIN_SDK_VERSION__", Twine(Maj));
// This historical but ambiguous name for the minSdkVersion macro. Keep
@@ -693,23 +690,32 @@ protected:
if (Opts.EnableAIXExtendedAltivecABI)
Builder.defineMacro("__EXTABI__");
- unsigned Major, Minor, Micro;
- Triple.getOSVersion(Major, Minor, Micro);
+ VersionTuple OsVersion = Triple.getOSVersion();
// Define AIX OS-Version Macros.
// Includes logic for legacy versions of AIX; no specific intent to support.
- std::pair<int, int> OsVersion = {Major, Minor};
- if (OsVersion >= std::make_pair(3, 2)) Builder.defineMacro("_AIX32");
- if (OsVersion >= std::make_pair(4, 1)) Builder.defineMacro("_AIX41");
- if (OsVersion >= std::make_pair(4, 3)) Builder.defineMacro("_AIX43");
- if (OsVersion >= std::make_pair(5, 0)) Builder.defineMacro("_AIX50");
- if (OsVersion >= std::make_pair(5, 1)) Builder.defineMacro("_AIX51");
- if (OsVersion >= std::make_pair(5, 2)) Builder.defineMacro("_AIX52");
- if (OsVersion >= std::make_pair(5, 3)) Builder.defineMacro("_AIX53");
- if (OsVersion >= std::make_pair(6, 1)) Builder.defineMacro("_AIX61");
- if (OsVersion >= std::make_pair(7, 1)) Builder.defineMacro("_AIX71");
- if (OsVersion >= std::make_pair(7, 2)) Builder.defineMacro("_AIX72");
- if (OsVersion >= std::make_pair(7, 3)) Builder.defineMacro("_AIX73");
+ if (OsVersion >= VersionTuple(3, 2))
+ Builder.defineMacro("_AIX32");
+ if (OsVersion >= VersionTuple(4, 1))
+ Builder.defineMacro("_AIX41");
+ if (OsVersion >= VersionTuple(4, 3))
+ Builder.defineMacro("_AIX43");
+ if (OsVersion >= VersionTuple(5, 0))
+ Builder.defineMacro("_AIX50");
+ if (OsVersion >= VersionTuple(5, 1))
+ Builder.defineMacro("_AIX51");
+ if (OsVersion >= VersionTuple(5, 2))
+ Builder.defineMacro("_AIX52");
+ if (OsVersion >= VersionTuple(5, 3))
+ Builder.defineMacro("_AIX53");
+ if (OsVersion >= VersionTuple(6, 1))
+ Builder.defineMacro("_AIX61");
+ if (OsVersion >= VersionTuple(7, 1))
+ Builder.defineMacro("_AIX71");
+ if (OsVersion >= VersionTuple(7, 2))
+ Builder.defineMacro("_AIX72");
+ if (OsVersion >= VersionTuple(7, 3))
+ Builder.defineMacro("_AIX73");
// FIXME: Do not define _LONG_LONG when -fno-long-long is specified.
Builder.defineMacro("_LONG_LONG");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
index d5bfc369583f..b5cf73d73e95 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
@@ -69,7 +69,7 @@ public:
const char *getClobbers() const override { return ""; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
// We attempt to use PNaCl (le32) frontend and Mips32EL backend.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index c3c61ed443ca..7f7b44b658eb 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -623,14 +623,11 @@ void PPCTargetInfo::addP10SpecificFeatures(
Features["pcrelative-memops"] = true;
Features["prefix-instrs"] = true;
Features["isa-v31-instructions"] = true;
- return;
}
// Add features specific to the "Future" CPU.
void PPCTargetInfo::addFutureSpecificFeatures(
- llvm::StringMap<bool> &Features) const {
- return;
-}
+ llvm::StringMap<bool> &Features) const {}
bool PPCTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index f19d3ebbc066..60701072ac4b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -351,7 +351,7 @@ public:
const char *getFloat128Mangling() const override { return "u9__ieee128"; }
const char *getIbm128Mangling() const override { return "g"; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
bool isSPRegName(StringRef RegName) const override {
return RegName.equals("r1") || RegName.equals("x1");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index 93562dde2f54..770d37a1c1be 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -246,6 +246,9 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ISAInfo = std::move(*ParseResult);
}
+ if (ABI.empty())
+ ABI = llvm::RISCV::computeDefaultABIFromArch(*ISAInfo).str();
+
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
index f7ffe9febcd0..5331ed4a50ae 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
@@ -95,7 +95,7 @@ public:
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
index 8cf18b6c20f1..a40d4b3ca27e 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
@@ -162,7 +162,7 @@ public:
supportAllOpenCLOpts();
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
bool hasInt128Type() const override { return false; }
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
index 22a1621fcb9f..177a117520da 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
@@ -48,8 +48,6 @@ public:
bool hasFeature(StringRef Feature) const override;
- bool hasSjLjLowering() const override { return true; }
-
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
// FIXME: Implement!
return None;
@@ -178,8 +176,7 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- bool hasSjLjLowering() const override { return true; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
// SPARCV8el is the 32-bit little-endian mode selected by Triple::sparcel.
@@ -232,7 +229,7 @@ public:
return getCPUGeneration(CPU) == CG_V9;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
index d3e3ed50dd47..92cefeea5d26 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
@@ -170,7 +170,7 @@ public:
const char *getLongDoubleMangling() const override { return "g"; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
int getEHDataRegisterNumber(unsigned RegNo) const override {
return RegNo < 4 ? 6 + RegNo : -1;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
index 16534d3ef99b..075486990558 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
@@ -137,7 +137,7 @@ private:
}
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
bool hasProtectedVisibility() const override { return false; }
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index b9b2ac79815b..c952b8c9a336 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -462,7 +462,7 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY NetBSDI386TargetInfo
@@ -472,10 +472,9 @@ public:
: NetBSDTargetInfo<X86_32TargetInfo>(Triple, Opts) {}
unsigned getFloatEvalMethod() const override {
- unsigned Major, Minor, Micro;
- getTriple().getOSVersion(Major, Minor, Micro);
+ VersionTuple OsVersion = getTriple().getOSVersion();
// New NetBSD uses the default rounding mode.
- if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 26) || Major == 0)
+ if (OsVersion >= VersionTuple(6, 99, 26) || OsVersion.getMajor() == 0)
return X86_32TargetInfo::getFloatEvalMethod();
// NetBSD before 6.99.26 defaults to "double" rounding.
return 1;
@@ -769,7 +768,7 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
// x86-64 Windows target
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
index c33766751aa1..25f20581839d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
@@ -76,7 +76,7 @@ public:
bool allowsLargerPreferedTypeAlignment() const override { return false; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Version.cpp b/contrib/llvm-project/clang/lib/Basic/Version.cpp
index af3118b0f6da..e205da7adec1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Version.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Version.cpp
@@ -82,7 +82,7 @@ std::string getClangFullRepositoryVersion() {
OS << LLVMRepo << ' ';
OS << LLVMRev << ')';
}
- return OS.str();
+ return buf;
}
std::string getClangFullVersion() {
@@ -102,7 +102,7 @@ std::string getClangToolFullVersion(StringRef ToolName) {
OS << " " << repo;
}
- return OS.str();
+ return buf;
}
std::string getClangFullCPPVersion() {
@@ -120,7 +120,7 @@ std::string getClangFullCPPVersion() {
OS << " " << repo;
}
- return OS.str();
+ return buf;
}
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
index 56f0dd4322d2..0d12183055e1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
@@ -105,7 +105,7 @@ namespace swiftcall {
uint64_t &Members) const;
// Implement the Type::IsPromotableIntegerType for ABI specific needs. The
- // only difference is that this considers _ExtInt as well.
+ // only difference is that this considers bit-precise integer types as well.
bool isPromotableIntegerTypeForABI(QualType Ty) const;
/// A convenience method to return an indirect ABIArgInfo with an
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Address.h b/contrib/llvm-project/clang/lib/CodeGen/Address.h
index 6a8e57f8db33..37c20291c0e8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Address.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/Address.h
@@ -23,15 +23,29 @@ namespace CodeGen {
/// An aligned address.
class Address {
llvm::Value *Pointer;
+ llvm::Type *ElementType;
CharUnits Alignment;
+
+protected:
+ Address(std::nullptr_t) : Pointer(nullptr), ElementType(nullptr) {}
+
public:
- Address(llvm::Value *pointer, CharUnits alignment)
- : Pointer(pointer), Alignment(alignment) {
- assert((!alignment.isZero() || pointer == nullptr) &&
- "creating valid address with invalid alignment");
+ Address(llvm::Value *pointer, llvm::Type *elementType, CharUnits alignment)
+ : Pointer(pointer), ElementType(elementType), Alignment(alignment) {
+ assert(pointer != nullptr && "Pointer cannot be null");
+ assert(elementType != nullptr && "Element type cannot be null");
+ assert(llvm::cast<llvm::PointerType>(pointer->getType())
+ ->isOpaqueOrPointeeTypeMatches(elementType) &&
+ "Incorrect pointer element type");
+ assert(!alignment.isZero() && "Alignment cannot be zero");
}
- static Address invalid() { return Address(nullptr, CharUnits()); }
+ // Deprecated: Use constructor with explicit element type instead.
+ Address(llvm::Value *Pointer, CharUnits Alignment)
+ : Address(Pointer, Pointer->getType()->getPointerElementType(),
+ Alignment) {}
+
+ static Address invalid() { return Address(nullptr); }
bool isValid() const { return Pointer != nullptr; }
llvm::Value *getPointer() const {
@@ -45,11 +59,9 @@ public:
}
/// Return the type of the values stored in this address.
- ///
- /// When IR pointer types lose their element type, we should simply
- /// store it in Address instead for the convenience of writing code.
llvm::Type *getElementType() const {
- return getType()->getElementType();
+ assert(isValid());
+ return ElementType;
}
/// Return the address space that this address resides in.
@@ -67,30 +79,42 @@ public:
assert(isValid());
return Alignment;
}
+
+ /// Return address with different pointer, but same element type and
+ /// alignment.
+ Address withPointer(llvm::Value *NewPointer) const {
+ return Address(NewPointer, ElementType, Alignment);
+ }
+
+ /// Return address with different alignment, but same pointer and element
+ /// type.
+ Address withAlignment(CharUnits NewAlignment) const {
+ return Address(Pointer, ElementType, NewAlignment);
+ }
};
/// A specialization of Address that requires the address to be an
/// LLVM Constant.
class ConstantAddress : public Address {
+ ConstantAddress(std::nullptr_t) : Address(nullptr) {}
+
public:
- ConstantAddress(llvm::Constant *pointer, CharUnits alignment)
- : Address(pointer, alignment) {}
+ ConstantAddress(llvm::Constant *pointer, llvm::Type *elementType,
+ CharUnits alignment)
+ : Address(pointer, elementType, alignment) {}
static ConstantAddress invalid() {
- return ConstantAddress(nullptr, CharUnits());
+ return ConstantAddress(nullptr);
}
llvm::Constant *getPointer() const {
return llvm::cast<llvm::Constant>(Address::getPointer());
}
- ConstantAddress getBitCast(llvm::Type *ty) const {
- return ConstantAddress(llvm::ConstantExpr::getBitCast(getPointer(), ty),
- getAlignment());
- }
-
- ConstantAddress getElementBitCast(llvm::Type *ty) const {
- return getBitCast(ty->getPointerTo(getAddressSpace()));
+ ConstantAddress getElementBitCast(llvm::Type *ElemTy) const {
+ llvm::Constant *BitCast = llvm::ConstantExpr::getBitCast(
+ getPointer(), ElemTy->getPointerTo(getAddressSpace()));
+ return ConstantAddress(BitCast, ElemTy, getAlignment());
}
static bool isaImpl(Address addr) {
@@ -98,7 +122,7 @@ public:
}
static ConstantAddress castImpl(Address addr) {
return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
- addr.getAlignment());
+ addr.getElementType(), addr.getAlignment());
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index 510f3911939c..bacac0a20d4d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -94,10 +94,16 @@ using namespace llvm;
llvm::PassPluginLibraryInfo get##Ext##PluginInfo();
#include "llvm/Support/Extension.def"
+namespace llvm {
+extern cl::opt<bool> DebugInfoCorrelate;
+}
+
namespace {
// Default filename used for profile generation.
-static constexpr StringLiteral DefaultProfileGenName = "default_%m.profraw";
+std::string getDefaultProfileGenName() {
+ return DebugInfoCorrelate ? "default_%p.proflite" : "default_%m.profraw";
+}
class EmitAssemblyHelper {
DiagnosticsEngine &Diags;
@@ -597,8 +603,6 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
Options.EnableAIXExtendedAltivecABI = CodeGenOpts.EnableAIXExtendedAltivecABI;
- Options.ValueTrackingVariableLocations =
- CodeGenOpts.ValueTrackingVariableLocations;
Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
Options.LoopAlignment = CodeGenOpts.LoopAlignment;
@@ -640,6 +644,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
+ Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
return true;
}
@@ -886,7 +891,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
- PMBuilder.PGOInstrGen = std::string(DefaultProfileGenName);
+ PMBuilder.PGOInstrGen = getDefaultProfileGenName();
}
if (CodeGenOpts.hasProfileIRUse()) {
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
@@ -1231,7 +1236,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (CodeGenOpts.hasProfileIRInstr())
// -fprofile-generate.
PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
+ ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput,
"", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
CodeGenOpts.DebugInfoForProfiling);
@@ -1269,13 +1274,13 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
"Cannot run CSProfileGen pass with ProfileGen or SampleUse "
" pass");
PGOOpt->CSProfileGenFile = CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
+ ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput;
PGOOpt->CSAction = PGOOptions::CSIRInstr;
} else
PGOOpt = PGOOptions("",
CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
+ ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput,
"", PGOOptions::NoAction, PGOOptions::CSIRInstr,
CodeGenOpts.DebugInfoForProfiling);
@@ -1577,7 +1582,8 @@ static void runThinLTOBackend(
return;
auto AddStream = [&](size_t Task) {
- return std::make_unique<CachedFileStream>(std::move(OS));
+ return std::make_unique<CachedFileStream>(std::move(OS),
+ CGOpts.ObjectFilenameForDebug);
};
lto::Config Conf;
if (CGOpts.SaveTempsFilePrefix != "") {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index b68e6328acdf..e81c5ba5055c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -1079,8 +1079,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (AS == LangAS::opencl_generic)
return V;
auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
- auto T = V->getType();
- auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
+ auto T = llvm::cast<llvm::PointerType>(V->getType());
+ auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
return getTargetHooks().performAddrSpaceCast(
*this, V, AS, LangAS::opencl_generic, DestType, false);
@@ -1321,15 +1321,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
ResVal = Builder.CreateNot(ResVal);
Builder.CreateStore(
- ResVal,
- Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
+ ResVal, Builder.CreateElementBitCast(Dest, ResVal->getType()));
}
if (RValTy->isVoidType())
return RValue::get(nullptr);
return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
+ Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
@@ -1382,8 +1381,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
return RValue::get(nullptr);
return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
- Dest.getAddressSpace())),
+ Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
@@ -1455,17 +1453,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
- Dest.getAddressSpace())),
+ Builder.CreateElementBitCast(Dest, ConvertTypeForMem(RValTy)),
RValTy, E->getExprLoc());
}
Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
- unsigned addrspace =
- cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
llvm::IntegerType *ty =
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
- return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
+ return CGF.Builder.CreateElementBitCast(addr, ty);
}
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index 2da2014345d8..7bb6dbb8a8ac 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -2721,8 +2721,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
Address addr = emission.Addr;
// That's an alloca of the byref structure type.
- llvm::StructType *byrefType = cast<llvm::StructType>(
- cast<llvm::PointerType>(addr.getPointer()->getType())->getElementType());
+ llvm::StructType *byrefType = cast<llvm::StructType>(addr.getElementType());
unsigned nextHeaderIndex = 0;
CharUnits nextHeaderOffset;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
index 4fad44a105cd..7c9f41e84eaf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
@@ -86,7 +86,8 @@ public:
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
- assert(Addr->getType()->getPointerElementType() == Ty);
+ assert(llvm::cast<llvm::PointerType>(Addr->getType())
+ ->isOpaqueOrPointeeTypeMatches(Ty));
return CreateAlignedLoad(Ty, Addr, Align.getAsAlign(), Name);
}
@@ -115,13 +116,15 @@ public:
/// Emit a load from an i1 flag variable.
llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
const llvm::Twine &Name = "") {
- assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ assert(llvm::cast<llvm::PointerType>(Addr->getType())
+ ->isOpaqueOrPointeeTypeMatches(getInt1Ty()));
return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
}
/// Emit a store to an i1 flag variable.
llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
- assert(Addr->getType()->getPointerElementType() == getInt1Ty());
+ assert(llvm::cast<llvm::PointerType>(Addr->getType())
+ ->isOpaqueOrPointeeTypeMatches(getInt1Ty()));
return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
}
@@ -165,8 +168,9 @@ public:
/// preserving information like the alignment and address space.
Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
- auto PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
- return CreateBitCast(Addr, PtrTy, Name);
+ auto *PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
+ return Address(CreateBitCast(Addr.getPointer(), PtrTy, Name),
+ Ty, Addr.getAlignment());
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
@@ -194,6 +198,7 @@ public:
return Address(CreateStructGEP(Addr.getElementType(),
Addr.getPointer(), Index, Name),
+ ElTy->getElementType(Index),
Addr.getAlignment().alignmentAtOffset(Offset));
}
@@ -215,6 +220,7 @@ public:
return Address(
CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
{getSize(CharUnits::Zero()), getSize(Index)}, Name),
+ ElTy->getElementType(),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
@@ -231,6 +237,7 @@ public:
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
+ ElTy,
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
@@ -247,15 +254,32 @@ public:
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
+ Addr.getElementType(),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
+ /// Create GEP with single dynamic index. The address alignment is reduced
+ /// according to the element size.
+ using CGBuilderBaseTy::CreateGEP;
+ Address CreateGEP(Address Addr, llvm::Value *Index,
+ const llvm::Twine &Name = "") {
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ CharUnits EltSize =
+ CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
+
+ return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(), Index,
+ Name),
+ Addr.getElementType(),
+ Addr.getAlignment().alignmentOfArrayElement(EltSize));
+ }
+
/// Given a pointer to i8, adjust it by a given constant offset.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
+ Addr.getElementType(),
Addr.getAlignment().alignmentAtOffset(Offset));
}
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
@@ -263,6 +287,7 @@ public:
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
+ Addr.getElementType(),
Addr.getAlignment().alignmentAtOffset(Offset));
}
@@ -278,8 +303,9 @@ public:
/*isSigned=*/true);
if (!GEP->accumulateConstantOffset(DL, Offset))
llvm_unreachable("offset of GEP with constants is always computable");
- return Address(GEP, Addr.getAlignment().alignmentAtOffset(
- CharUnits::fromQuantity(Offset.getSExtValue())));
+ return Address(GEP, GEP->getResultElementType(),
+ Addr.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(Offset.getSExtValue())));
}
using CGBuilderBaseTy::CreateMemCpy;
@@ -330,8 +356,14 @@ public:
return Address(CreatePreserveStructAccessIndex(ElTy, Addr.getPointer(),
Index, FieldIndex, DbgInfo),
+ ElTy->getElementType(Index),
Addr.getAlignment().alignmentAtOffset(Offset));
}
+
+ using CGBuilderBaseTy::CreateLaunderInvariantGroup;
+ Address CreateLaunderInvariantGroup(Address Addr) {
+ return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()));
+ }
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index 5d6df59cc405..1982b40ff667 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -96,13 +96,33 @@ llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
StringRef Name;
GlobalDecl D(FD);
+ // TODO: This list should be expanded or refactored after all GCC-compatible
+ // std libcall builtins are implemented.
+ static SmallDenseMap<unsigned, StringRef, 8> F128Builtins{
+ {Builtin::BI__builtin_printf, "__printfieee128"},
+ {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
+ {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
+ {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
+ {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
+ {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
+ {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
+ };
+
// If the builtin has been declared explicitly with an assembler label,
// use the mangled name. This differs from the plain label on platforms
// that prefix labels.
if (FD->hasAttr<AsmLabelAttr>())
Name = getMangledName(D);
- else
- Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
+ else {
+ // TODO: This mutation should also be applied to other targets other than
+ // PPC, after backend supports IEEE 128-bit style libcalls.
+ if (getTriple().isPPC64() &&
+ &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
+ F128Builtins.find(BuiltinID) != F128Builtins.end())
+ Name = F128Builtins[BuiltinID];
+ else
+ Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
+ }
llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
@@ -667,7 +687,7 @@ getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
unsigned Width = Type->isBooleanType() ? 1
- : Type->isExtIntType() ? context.getIntWidth(Type)
+ : Type->isBitIntType() ? context.getIntWidth(Type)
: context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
@@ -1482,8 +1502,7 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = ArgValue->getType();
- llvm::Type *IndexType =
- IndexAddress.getPointer()->getType()->getPointerElementType();
+ llvm::Type *IndexType = IndexAddress.getElementType();
llvm::Type *ResultType = ConvertType(E->getType());
Value *ArgZero = llvm::Constant::getNullValue(ArgType);
@@ -3113,6 +3132,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
"elt.abs");
return RValue::get(Result);
}
+
+ case Builtin::BI__builtin_elementwise_ceil: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::ceil, Op0,
+ nullptr, "elt.ceil");
+ return RValue::get(Result);
+ }
+
case Builtin::BI__builtin_elementwise_max: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
@@ -3184,6 +3211,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_reduce_xor: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Result = Builder.CreateUnaryIntrinsic(
+ llvm::Intrinsic::vector_reduce_xor, Op0, nullptr, "rdx.xor");
+ return RValue::get(Result);
+ }
+
case Builtin::BI__builtin_matrix_transpose: {
const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
Value *MatValue = EmitScalarExpr(E->getArg(0));
@@ -4478,6 +4512,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
+ case Builtin::BI__builtin_function_start:
+ return RValue::get(CGM.GetFunctionStart(
+ E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
@@ -4674,8 +4711,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
case Builtin::BI__builtin_coro_suspend:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
- case Builtin::BI__builtin_coro_param:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
// OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
case Builtin::BIread_pipe:
@@ -5221,9 +5256,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *BPP = Int8PtrPtrTy;
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
- DestAddr.getAlignment());
+ Int8PtrTy, DestAddr.getAlignment());
SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
- SrcAddr.getAlignment());
+ Int8PtrTy, SrcAddr.getAlignment());
Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
@@ -6385,6 +6420,7 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
#define GET_SVE_LLVM_INTRINSIC_MAP
#include "clang/Basic/arm_sve_builtin_cg.inc"
+#include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
#undef GET_SVE_LLVM_INTRINSIC_MAP
};
@@ -9308,6 +9344,54 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
return Builder.CreateCall(F, {V0, V1, Ops[1]});
}
+
+ case SVE::BI__builtin_sve_svset_neonq_s8:
+ case SVE::BI__builtin_sve_svset_neonq_s16:
+ case SVE::BI__builtin_sve_svset_neonq_s32:
+ case SVE::BI__builtin_sve_svset_neonq_s64:
+ case SVE::BI__builtin_sve_svset_neonq_u8:
+ case SVE::BI__builtin_sve_svset_neonq_u16:
+ case SVE::BI__builtin_sve_svset_neonq_u32:
+ case SVE::BI__builtin_sve_svset_neonq_u64:
+ case SVE::BI__builtin_sve_svset_neonq_f16:
+ case SVE::BI__builtin_sve_svset_neonq_f32:
+ case SVE::BI__builtin_sve_svset_neonq_f64:
+ case SVE::BI__builtin_sve_svset_neonq_bf16: {
+ return Builder.CreateInsertVector(Ty, Ops[0], Ops[1], Builder.getInt64(0));
+ }
+
+ case SVE::BI__builtin_sve_svget_neonq_s8:
+ case SVE::BI__builtin_sve_svget_neonq_s16:
+ case SVE::BI__builtin_sve_svget_neonq_s32:
+ case SVE::BI__builtin_sve_svget_neonq_s64:
+ case SVE::BI__builtin_sve_svget_neonq_u8:
+ case SVE::BI__builtin_sve_svget_neonq_u16:
+ case SVE::BI__builtin_sve_svget_neonq_u32:
+ case SVE::BI__builtin_sve_svget_neonq_u64:
+ case SVE::BI__builtin_sve_svget_neonq_f16:
+ case SVE::BI__builtin_sve_svget_neonq_f32:
+ case SVE::BI__builtin_sve_svget_neonq_f64:
+ case SVE::BI__builtin_sve_svget_neonq_bf16: {
+ return Builder.CreateExtractVector(Ty, Ops[0], Builder.getInt64(0));
+ }
+
+ case SVE::BI__builtin_sve_svdup_neonq_s8:
+ case SVE::BI__builtin_sve_svdup_neonq_s16:
+ case SVE::BI__builtin_sve_svdup_neonq_s32:
+ case SVE::BI__builtin_sve_svdup_neonq_s64:
+ case SVE::BI__builtin_sve_svdup_neonq_u8:
+ case SVE::BI__builtin_sve_svdup_neonq_u16:
+ case SVE::BI__builtin_sve_svdup_neonq_u32:
+ case SVE::BI__builtin_sve_svdup_neonq_u64:
+ case SVE::BI__builtin_sve_svdup_neonq_f16:
+ case SVE::BI__builtin_sve_svdup_neonq_f32:
+ case SVE::BI__builtin_sve_svdup_neonq_f64:
+ case SVE::BI__builtin_sve_svdup_neonq_bf16: {
+ Value *Insert = Builder.CreateInsertVector(Ty, UndefValue::get(Ty), Ops[0],
+ Builder.getInt64(0));
+ return Builder.CreateIntrinsic(Intrinsic::aarch64_sve_dupq_lane, {Ty},
+ {Insert, Builder.getInt64(0)});
+ }
}
/// Should not happen
@@ -15331,7 +15415,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// If the user wants the entire vector, just load the entire vector.
if (NumBytes == 16) {
Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
- Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1)));
+ Value *LD =
+ Builder.CreateLoad(Address(BC, ResTy, CharUnits::fromQuantity(1)));
if (!IsLE)
return LD;
@@ -15392,8 +15477,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
RevMask.push_back(15 - Idx);
StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
}
- return Builder.CreateStore(StVec,
- Address(BC, CharUnits::fromQuantity(1)));
+ return Builder.CreateStore(
+ StVec, Address(BC, Ops[2]->getType(), CharUnits::fromQuantity(1)));
}
auto *ConvTy = Int64Ty;
unsigned NumElts = 0;
@@ -15427,8 +15512,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
Elt = Builder.CreateCall(F, Elt);
}
- return Builder.CreateStore(Elt,
- Address(PtrBC, CharUnits::fromQuantity(1)));
+ return Builder.CreateStore(
+ Elt, Address(PtrBC, ConvTy, CharUnits::fromQuantity(1)));
};
unsigned Stored = 0;
unsigned RemainingBytes = NumBytes;
@@ -16222,7 +16307,8 @@ Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
auto *DstTy =
CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
- auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
+ auto *LD = CGF.Builder.CreateLoad(
+ Address(Cast, CGF.Int16Ty, CharUnits::fromQuantity(2)));
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
@@ -16242,7 +16328,8 @@ Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
auto *DstTy =
CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
- auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
+ auto *LD = CGF.Builder.CreateLoad(
+ Address(Cast, CGF.Int32Ty, CharUnits::fromQuantity(4)));
LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
llvm::MDNode::get(CGF.getLLVMContext(), None));
return LD;
@@ -16314,8 +16401,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
- llvm::Type *RealFlagType
- = FlagOutPtr.getPointer()->getType()->getPointerElementType();
+ llvm::Type *RealFlagType = FlagOutPtr.getElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
Builder.CreateStore(FlagExt, FlagOutPtr);
@@ -16572,6 +16658,15 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
+ // The builtins take these arguments as vec4 where the last element is
+ // ignored. The intrinsic takes them as vec3.
+ RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin,
+ ArrayRef<int>{0, 1, 2});
+ RayDir =
+ Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2});
+ RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir,
+ ArrayRef<int>{0, 1, 2});
+
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
{NodePtr->getType(), RayDir->getType()});
return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
@@ -17938,7 +18033,7 @@ RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
if (getLangOpts().isSignedOverflowDefined())
Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
else
- Result = EmitCheckedInBoundsGEP(Base, Difference,
+ Result = EmitCheckedInBoundsGEP(Int8Ty, Base, Difference,
/*SignedIndices=*/true,
/*isSubtraction=*/!AlignUp,
E->getExprLoc(), "aligned_result");
@@ -18501,6 +18596,7 @@ getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
+ // Legacy builtins that take a vector in place of a vector predicate.
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
@@ -18534,8 +18630,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
Address A = EmitPointerWithAlignment(E->getArg(0));
- Address BP = Address(
- Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
+ Address BP = Address(Builder.CreateBitCast(
+ A.getPointer(), Int8PtrPtrTy), Int8PtrTy, A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// The treatment of both loads and stores is the same: the arguments for
// the builtin are the same as the arguments for the intrinsic.
@@ -18579,7 +18675,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// per call.
Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
- DestAddr.getAlignment());
+ Int8Ty, DestAddr.getAlignment());
llvm::Value *DestAddress = DestAddr.getPointer();
// Operands are Base, Dest, Modifier.
@@ -18626,8 +18722,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
// Get the type from the 0-th argument.
llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
- Address PredAddr = Builder.CreateBitCast(
- EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
+ Address PredAddr = Builder.CreateElementBitCast(
+ EmitPointerWithAlignment(E->getArg(2)), VecType);
llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
@@ -18638,6 +18734,27 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractValue(Result, 0);
}
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: {
+ SmallVector<llvm::Value*,4> Ops;
+ const Expr *PredOp = E->getArg(0);
+ // There will be an implicit cast to a boolean vector. Strip it.
+ if (auto *Cast = dyn_cast<ImplicitCastExpr>(PredOp)) {
+ if (Cast->getCastKind() == CK_BitCast)
+ PredOp = Cast->getSubExpr();
+ Ops.push_back(V2Q(EmitScalarExpr(PredOp)));
+ }
+ for (int i = 1, e = E->getNumArgs(); i != e; ++i)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ }
+
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
@@ -18674,40 +18791,6 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
case Hexagon::BI__builtin_brev_ldd:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
-
- default: {
- if (ID == Intrinsic::not_intrinsic)
- return nullptr;
-
- auto IsVectorPredTy = [](llvm::Type *T) {
- return T->isVectorTy() &&
- cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
- };
-
- llvm::Function *IntrFn = CGM.getIntrinsic(ID);
- llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
- SmallVector<llvm::Value*,4> Ops;
- for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
- llvm::Type *T = IntrTy->getParamType(i);
- const Expr *A = E->getArg(i);
- if (IsVectorPredTy(T)) {
- // There will be an implicit cast to a boolean vector. Strip it.
- if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
- if (Cast->getCastKind() == CK_BitCast)
- A = Cast->getSubExpr();
- }
- Ops.push_back(V2Q(EmitScalarExpr(A)));
- } else {
- Ops.push_back(EmitScalarExpr(A));
- }
- }
-
- llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
- if (IsVectorPredTy(IntrTy->getReturnType()))
- Call = Q2V(Call);
-
- return Call;
- } // default
} // switch
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
index a1b4431ca8c4..c4e3f7f54f4f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
@@ -814,6 +814,9 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
Linkage,
/*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
"__hip_gpubin_handle");
+ if (Linkage == llvm::GlobalValue::LinkOnceAnyLinkage)
+ GpuBinaryHandle->setComdat(
+ CGM.getModule().getOrInsertComdat(GpuBinaryHandle->getName()));
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
// Prevent the weak symbol in different shared libraries being merged.
if (Linkage != llvm::GlobalValue::InternalLinkage)
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index d830a7e01709..d70f78fea6b4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -1261,8 +1261,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- Src = CGF.Builder.CreateBitCast(Src,
- Ty->getPointerTo(Src.getAddressSpace()));
+ Src = CGF.Builder.CreateElementBitCast(Src, Ty);
return CGF.Builder.CreateLoad(Src);
}
@@ -1832,11 +1831,6 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
FuncAttrs.addAttribute("no-trapping-math", "true");
- // Strict (compliant) code is the default, so only add this attribute to
- // indicate that we are trying to workaround a problem case.
- if (!CodeGenOpts.StrictFloatCastOverflow)
- FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
-
// TODO: Are these all needed?
// unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
if (LangOpts.NoHonorInfs)
@@ -1971,7 +1965,7 @@ static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
// there's no internal padding (typeSizeEqualsStoreSize).
return false;
}
- if (QTy->isExtIntType())
+ if (QTy->isBitIntType())
return true;
if (QTy->isReferenceType())
return true;
@@ -2686,8 +2680,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- Address ParamAddr =
- Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
+ Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
+ ArgI.getIndirectAlign());
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -3475,12 +3469,19 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
case TEK_Aggregate:
// Do nothing; aggregrates get evaluated directly into the destination.
break;
- case TEK_Scalar:
- EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
- MakeNaturalAlignAddrLValue(&*AI, RetTy),
- /*isInit*/ true);
+ case TEK_Scalar: {
+ LValueBaseInfo BaseInfo;
+ TBAAAccessInfo TBAAInfo;
+ CharUnits Alignment =
+ CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
+ Address ArgAddr(&*AI, ConvertType(RetTy), Alignment);
+ LValue ArgVal =
+ LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
+ EmitStoreOfScalar(
+ Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
break;
}
+ }
break;
}
@@ -4134,8 +4135,7 @@ void CodeGenFunction::EmitCallArgs(
}
// If we still have any arguments, emit them using the type of the argument.
- for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
- ArgRange.end()))
+ for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
@@ -4308,11 +4308,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
- AggValueSlot Slot;
- if (args.isUsingInAlloca())
- Slot = createPlaceholderSlot(*this, type);
- else
- Slot = CreateAggTemp(type, "agg.tmp");
+ AggValueSlot Slot = args.isUsingInAlloca()
+ ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
bool DestroyedInCallee = true, NeedsEHCleanup = true;
if (const auto *RD = type->getAsCXXRecordDecl())
@@ -4651,13 +4648,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
//
// In other cases, we assert that the types match up (until pointers stop
// having pointee types).
- llvm::Type *TypeFromVal;
if (Callee.isVirtual())
- TypeFromVal = Callee.getVirtualFunctionType();
- else
- TypeFromVal =
- Callee.getFunctionPointer()->getType()->getPointerElementType();
- assert(IRFuncTy == TypeFromVal);
+ assert(IRFuncTy == Callee.getVirtualFunctionType());
+ else {
+ llvm::PointerType *PtrTy =
+ llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType());
+ assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy));
+ }
}
#endif
@@ -4872,7 +4869,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
I->copyInto(*this, AI);
} else {
// Skip the extra memcpy call.
- auto *T = V->getType()->getPointerElementType()->getPointerTo(
+ auto *T = llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(V->getType()),
CGM.getDataLayout().getAllocaAddrSpace());
IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
*this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
@@ -4967,8 +4965,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
Src = TempAlloca;
} else {
- Src = Builder.CreateBitCast(Src,
- STy->getPointerTo(Src.getAddressSpace()));
+ Src = Builder.CreateElementBitCast(Src, STy);
}
assert(NumIRArgs == STy->getNumElements());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
index e3d9fec6d363..c8594068c3fc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
@@ -115,7 +115,8 @@ public:
AbstractInfo = abstractInfo;
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
- assert(functionPtr->getType()->getPointerElementType()->isFunctionTy());
+ assert(functionPtr->getType()->isOpaquePointerTy() ||
+ functionPtr->getType()->getPointerElementType()->isFunctionTy());
}
static CGCallee forBuiltin(unsigned builtinID,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 0df64d4d5d26..8f99ff0d50ff 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -127,18 +127,18 @@ CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
Address CodeGenFunction::LoadCXXThisAddress() {
assert(CurFuncDecl && "loading 'this' without a func declaration?");
- assert(isa<CXXMethodDecl>(CurFuncDecl));
+ auto *MD = cast<CXXMethodDecl>(CurFuncDecl);
// Lazily compute CXXThisAlignment.
if (CXXThisAlignment.isZero()) {
// Just use the best known alignment for the parent.
// TODO: if we're currently emitting a complete-object ctor/dtor,
// we can always use the complete-object alignment.
- auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
- CXXThisAlignment = CGM.getClassPointerAlignment(RD);
+ CXXThisAlignment = CGM.getClassPointerAlignment(MD->getParent());
}
- return Address(LoadCXXThis(), CXXThisAlignment);
+ llvm::Type *Ty = ConvertType(MD->getThisType()->getPointeeType());
+ return Address(LoadCXXThis(), Ty, CXXThisAlignment);
}
/// Emit the address of a field using a member data pointer.
@@ -286,7 +286,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
}
alignment = alignment.alignmentAtOffset(nonVirtualOffset);
- return Address(ptr, alignment);
+ return Address(ptr, CGF.Int8Ty, alignment);
}
Address CodeGenFunction::GetAddressOfBaseClass(
@@ -326,9 +326,9 @@ Address CodeGenFunction::GetAddressOfBaseClass(
}
// Get the base pointer type.
+ llvm::Type *BaseValueTy = ConvertType((PathEnd[-1])->getType());
llvm::Type *BasePtrTy =
- ConvertType((PathEnd[-1])->getType())
- ->getPointerTo(Value.getType()->getPointerAddressSpace());
+ BaseValueTy->getPointerTo(Value.getType()->getPointerAddressSpace());
QualType DerivedTy = getContext().getRecordType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
@@ -342,7 +342,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
DerivedTy, DerivedAlign, SkippedChecks);
}
- return Builder.CreateBitCast(Value, BasePtrTy);
+ return Builder.CreateElementBitCast(Value, BaseValueTy);
}
llvm::BasicBlock *origBB = nullptr;
@@ -379,7 +379,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
VirtualOffset, Derived, VBase);
// Cast to the destination type.
- Value = Builder.CreateBitCast(Value, BasePtrTy);
+ Value = Builder.CreateElementBitCast(Value, BaseValueTy);
// Build a phi if we needed a null check.
if (NullCheckValue) {
@@ -406,16 +406,16 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
QualType DerivedTy =
getContext().getCanonicalType(getContext().getTagDeclType(Derived));
- unsigned AddrSpace =
- BaseAddr.getPointer()->getType()->getPointerAddressSpace();
- llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(AddrSpace);
+ unsigned AddrSpace = BaseAddr.getAddressSpace();
+ llvm::Type *DerivedValueTy = ConvertType(DerivedTy);
+ llvm::Type *DerivedPtrTy = DerivedValueTy->getPointerTo(AddrSpace);
llvm::Value *NonVirtualOffset =
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
if (!NonVirtualOffset) {
// No offset, we can just cast back.
- return Builder.CreateBitCast(BaseAddr, DerivedPtrTy);
+ return Builder.CreateElementBitCast(BaseAddr, DerivedValueTy);
}
llvm::BasicBlock *CastNull = nullptr;
@@ -453,7 +453,7 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
Value = PHI;
}
- return Address(Value, CGM.getClassPointerAlignment(Derived));
+ return Address(Value, DerivedValueTy, CGM.getClassPointerAlignment(Derived));
}
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
@@ -996,16 +996,8 @@ namespace {
private:
void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
- llvm::PointerType *DPT = DestPtr.getType();
- llvm::Type *DBP =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
- DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
-
- llvm::PointerType *SPT = SrcPtr.getType();
- llvm::Type *SBP =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
- SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
-
+ DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
+ SrcPtr = CGF.Builder.CreateElementBitCast(SrcPtr, CGF.Int8Ty);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
}
@@ -2068,8 +2060,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
if (SlotAS != ThisAS) {
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
- llvm::Type *NewType =
- ThisPtr->getType()->getPointerElementType()->getPointerTo(TargetThisAS);
+ llvm::Type *NewType = llvm::PointerType::getWithSamePointeeType(
+ This.getType(), TargetThisAS);
ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
ThisAS, SlotAS, NewType);
}
@@ -2507,9 +2499,6 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// Apply the offsets.
Address VTableField = LoadCXXThisAddress();
- unsigned ThisAddrSpace =
- VTableField.getPointer()->getType()->getPointerAddressSpace();
-
if (!NonVirtualOffset.isZero() || VirtualOffset)
VTableField = ApplyNonVirtualAndVirtualOffset(
*this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass,
@@ -2525,8 +2514,7 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
->getPointerTo(GlobalsAS);
// vtable field is is derived from `this` pointer, therefore they should be in
// the same addr space. Note that this might not be LLVM address space 0.
- VTableField = Builder.CreateBitCast(VTableField,
- VTablePtrTy->getPointerTo(ThisAddrSpace));
+ VTableField = Builder.CreateElementBitCast(VTableField, VTablePtrTy);
VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
index 1b54c0018d27..76f3a48f32f3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
@@ -242,7 +242,7 @@ class alignas(8) EHCleanupScope : public EHScope {
/// An optional i1 variable indicating whether this cleanup has been
/// activated yet.
- llvm::AllocaInst *ActiveFlag;
+ Address ActiveFlag;
/// Extra information required for cleanups that have resolved
/// branches through them. This has to be allocated on the side
@@ -290,7 +290,8 @@ public:
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
EnclosingNormal(enclosingNormal), NormalBlock(nullptr),
- ActiveFlag(nullptr), ExtInfo(nullptr), FixupDepth(fixupDepth) {
+ ActiveFlag(Address::invalid()), ExtInfo(nullptr),
+ FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
CleanupBits.IsActive = true;
@@ -320,13 +321,13 @@ public:
bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; }
void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; }
- bool hasActiveFlag() const { return ActiveFlag != nullptr; }
+ bool hasActiveFlag() const { return ActiveFlag.isValid(); }
Address getActiveFlag() const {
- return Address(ActiveFlag, CharUnits::One());
+ return ActiveFlag;
}
void setActiveFlag(Address Var) {
assert(Var.getAlignment().isOne());
- ActiveFlag = cast<llvm::AllocaInst>(Var.getPointer());
+ ActiveFlag = Var;
}
void setTestFlagInNormalCleanup() {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index ca071d3d2e80..2041d2a5b4c9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -597,6 +597,10 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
CGM.getIntrinsic(llvm::Intrinsic::coro_begin), {CoroId, Phi});
CurCoro.Data->CoroBegin = CoroBegin;
+ // We need to emit `get_­return_­object` first. According to:
+ // [dcl.fct.def.coroutine]p7
+ // The call to get_­return_­object is sequenced before the call to
+ // initial_­suspend and is invoked at most once.
GetReturnObjectManager GroManager(*this, S);
GroManager.EmitGroAlloca();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index af651e6f44b7..6e189a61dd20 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -768,7 +768,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
}
// Element count = (VLENB / SEW) x LMUL
- SmallVector<int64_t, 9> Expr(
+ SmallVector<int64_t, 12> Expr(
// The DW_OP_bregx operation has two operands: a register which is
// specified by an unsigned LEB128 number, followed by a signed LEB128
// offset.
@@ -782,6 +782,8 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
Expr.push_back(llvm::dwarf::DW_OP_div);
else
Expr.push_back(llvm::dwarf::DW_OP_mul);
+ // Element max index = count - 1
+ Expr.append({llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
auto *LowerBound =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
@@ -884,9 +886,9 @@ llvm::DIType *CGDebugInfo::CreateType(const AutoType *Ty) {
return DBuilder.createUnspecifiedType("auto");
}
-llvm::DIType *CGDebugInfo::CreateType(const ExtIntType *Ty) {
+llvm::DIType *CGDebugInfo::CreateType(const BitIntType *Ty) {
- StringRef Name = Ty->isUnsigned() ? "unsigned _ExtInt" : "_ExtInt";
+ StringRef Name = Ty->isUnsigned() ? "unsigned _BitInt" : "_BitInt";
llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
? llvm::dwarf::DW_ATE_unsigned
: llvm::dwarf::DW_ATE_signed;
@@ -3353,6 +3355,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
+ case Type::Using:
+ T = cast<UsingType>(T)->getUnderlyingType();
+ break;
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
break;
@@ -3531,8 +3536,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
case Type::Atomic:
return CreateType(cast<AtomicType>(Ty), Unit);
- case Type::ExtInt:
- return CreateType(cast<ExtIntType>(Ty));
+ case Type::BitInt:
+ return CreateType(cast<BitIntType>(Ty));
case Type::Pipe:
return CreateType(cast<PipeType>(Ty), Unit);
@@ -3545,6 +3550,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
case Type::Elaborated:
+ case Type::Using:
case Type::Paren:
case Type::MacroQualified:
case Type::SubstTemplateTypeParm:
@@ -3633,6 +3639,9 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// Record exports it symbols to the containing structure.
if (CXXRD->isAnonymousStructOrUnion())
Flags |= llvm::DINode::FlagExportSymbols;
+
+ Flags |= getAccessFlag(CXXRD->getAccess(),
+ dyn_cast<CXXRecordDecl>(CXXRD->getDeclContext()));
}
llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(D);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index a7b72fa5f5a6..14ff0eeabd21 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -177,7 +177,7 @@ class CGDebugInfo {
llvm::DIType *CreateType(const BuiltinType *Ty);
llvm::DIType *CreateType(const ComplexType *Ty);
llvm::DIType *CreateType(const AutoType *Ty);
- llvm::DIType *CreateType(const ExtIntType *Ty);
+ llvm::DIType *CreateType(const BitIntType *Ty);
llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg,
TypeLoc TL = TypeLoc());
llvm::DIType *CreateQualifiedType(const FunctionProtoType *Ty,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index 941671c61482..e09279c1d455 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -405,7 +405,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Store into LocalDeclMap before generating initializer to handle
// circular references.
- setAddrOfLocalVar(&D, Address(addr, alignment));
+ llvm::Type *elemTy = ConvertTypeForMem(D.getType());
+ setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -458,8 +459,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// RAUW's the GV uses of this constant will be invalid.
llvm::Constant *castedAddr =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
- if (var != castedAddr)
- LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
+ LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
CGM.setStaticLocalDeclAddress(&D, castedAddr);
CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
@@ -1146,7 +1146,7 @@ Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
CacheEntry->setAlignment(Align.getAsAlign());
}
- return Address(CacheEntry, Align);
+ return Address(CacheEntry, CacheEntry->getValueType(), Align);
}
static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
@@ -1193,7 +1193,7 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
bool valueAlreadyCorrect =
constant->isNullValue() || isa<llvm::UndefValue>(constant);
if (!valueAlreadyCorrect) {
- Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
+ Loc = Builder.CreateElementBitCast(Loc, Ty);
emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
IsAutoInit);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index d22f9dc3b68c..3579761f1429 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -172,7 +172,7 @@ void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
- llvm::Constant *DeclPtr,
+ llvm::GlobalVariable *GV,
bool PerformInit) {
const Expr *Init = D.getInit();
@@ -194,14 +194,16 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
// "shared" address space qualifier, but the constructor of StructWithCtor
// expects "this" in the "generic" address space.
unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
- unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
+ unsigned ActualAddrSpace = GV->getAddressSpace();
+ llvm::Constant *DeclPtr = GV;
if (ActualAddrSpace != ExpectedAddrSpace) {
- llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
- llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
+ llvm::PointerType *PTy = llvm::PointerType::getWithSamePointeeType(
+ GV->getType(), ExpectedAddrSpace);
DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
}
- ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
+ ConstantAddress DeclAddr(
+ DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D));
if (!T->isReferenceType()) {
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
index aff9c77d53c7..91ecbecc843f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
@@ -400,8 +400,8 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
- llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
- Address typedAddr = Builder.CreateBitCast(addr, ty);
+ llvm::Type *ty = ConvertTypeForMem(e->getType());
+ Address typedAddr = Builder.CreateElementBitCast(addr, ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -421,13 +421,13 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
Address CodeGenFunction::getExceptionSlot() {
if (!ExceptionSlot)
ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
- return Address(ExceptionSlot, getPointerAlign());
+ return Address(ExceptionSlot, Int8PtrTy, getPointerAlign());
}
Address CodeGenFunction::getEHSelectorSlot() {
if (!EHSelectorSlot)
EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
- return Address(EHSelectorSlot, CharUnits::fromQuantity(4));
+ return Address(EHSelectorSlot, Int32Ty, CharUnits::fromQuantity(4));
}
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index 4332e74dbb24..34b4951a7f72 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -71,7 +71,7 @@ Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
llvm::Value *ArraySize) {
auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
Alloca->setAlignment(Align.getAsAlign());
- return Address(Alloca, Align);
+ return Address(Alloca, Ty, Align);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
@@ -101,7 +101,7 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
}
- return Address(V, Align);
+ return Address(V, Ty, Align);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
@@ -144,7 +144,7 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
/*ArraySize=*/nullptr, Alloca);
if (Ty->isConstantMatrixType()) {
- auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
+ auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
ArrayTy->getNumElements());
@@ -1099,7 +1099,7 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
if (BaseInfo)
BaseInfo->mergeForCast(TargetTypeBaseInfo);
- Addr = Address(Addr.getPointer(), Align);
+ Addr = Address(Addr.getPointer(), Addr.getElementType(), Align);
}
}
@@ -1111,10 +1111,12 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
CodeGenFunction::CFITCK_UnrelatedCast,
CE->getBeginLoc());
}
- return CE->getCastKind() != CK_AddressSpaceConversion
- ? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
- : Builder.CreateAddrSpaceCast(Addr,
- ConvertType(E->getType()));
+
+ if (CE->getCastKind() == CK_AddressSpaceConversion)
+ return Builder.CreateAddrSpaceCast(Addr, ConvertType(E->getType()));
+
+ llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
+ return Builder.CreateElementBitCast(Addr, ElemTy);
}
break;
@@ -1160,7 +1162,8 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Otherwise, use the alignment of the type.
CharUnits Align =
CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
- return Address(EmitScalarExpr(E), Align);
+ llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
+ return Address(EmitScalarExpr(E), ElemTy, Align);
}
llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
@@ -1306,7 +1309,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
const ConstantExpr *CE = cast<ConstantExpr>(E);
if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
- ->getCallReturnType(getContext());
+ ->getCallReturnType(getContext())
+ ->getPointeeType();
return MakeNaturalAlignAddrLValue(Result, RetType);
}
return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
@@ -1342,10 +1346,11 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
- llvm::Value *V = LV.getPointer(*this);
+ Address Addr = LV.getAddress(*this);
+ llvm::Value *V = Addr.getPointer();
Scope.ForceCleanup({&V});
- return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
- getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
+ return LValue::MakeAddr(Addr.withPointer(V), LV.getType(), getContext(),
+ LV.getBaseInfo(), LV.getTBAAInfo());
}
// FIXME: Is it possible to create an ExprWithCleanups that produces a
// bitfield lvalue or some other non-simple lvalue?
@@ -1777,16 +1782,14 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// MatrixType), if it points to a array (the memory type of MatrixType).
static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
bool IsVector = true) {
- auto *ArrayTy = dyn_cast<llvm::ArrayType>(
- cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
if (ArrayTy && IsVector) {
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
ArrayTy->getNumElements());
return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
}
- auto *VectorTy = dyn_cast<llvm::VectorType>(
- cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
if (VectorTy && !IsVector) {
auto *ArrayTy = llvm::ArrayType::get(
VectorTy->getElementType(),
@@ -2475,10 +2478,11 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
+ QualType PointeeType = RefLVal.getType()->getPointeeType();
CharUnits Align = CGM.getNaturalTypeAlignment(
- RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo,
+ PointeeType, PointeeBaseInfo, PointeeTBAAInfo,
/* forPointeeType= */ true);
- return Address(Load, Align);
+ return Address(Load, ConvertTypeForMem(PointeeType), Align);
}
LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
@@ -2528,7 +2532,7 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
- Address Addr(V, Alignment);
+ Address Addr(V, RealVarTy, Alignment);
// Emit reference to the private copy of the variable if it is an OpenMP
// threadprivate variable.
if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
@@ -2610,7 +2614,7 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
llvm::Value *Ptr =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
- return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
+ return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
}
/// Determine whether we can emit a reference to \p VD from the current
@@ -2706,7 +2710,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
/* BaseInfo= */ nullptr,
/* TBAAInfo= */ nullptr,
/* forPointeeType= */ true);
- Addr = Address(Val, Alignment);
+ Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -2783,9 +2787,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// Otherwise, it might be static local we haven't emitted yet for
// some reason; most likely, because it's in an outer function.
} else if (VD->isStaticLocal()) {
- addr = Address(CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)),
- getContext().getDeclAlign(VD));
+ llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false));
+ addr = Address(
+ var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
// No other cases for now.
} else {
@@ -3586,7 +3591,7 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
if (inbounds) {
- return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices,
+ return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
CodeGenFunction::NotSubtraction, loc,
name);
} else {
@@ -3698,7 +3703,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
idx, DbgInfo);
}
- return Address(eltPtr, eltAlign);
+ return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
}
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
@@ -4380,8 +4385,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
hasAnyVptr(FieldType, getContext()))
// Because unions can easily skip invariant.barriers, we need to add
// a barrier every time CXXRecord field with vptr is referenced.
- addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
- addr.getAlignment());
+ addr = Builder.CreateLaunderInvariantGroup(addr);
if (IsInPreservedAIRegion ||
(getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
@@ -4539,10 +4543,10 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
// because it can't be used.
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
EmitCXXThrowExpr(ThrowExpr);
- llvm::Type *Ty =
- llvm::PointerType::getUnqual(ConvertType(dead->getType()));
+ llvm::Type *ElemTy = ConvertType(dead->getType());
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ElemTy);
return MakeAddrLValue(
- Address(llvm::UndefValue::get(Ty), CharUnits::One()),
+ Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
dead->getType());
}
return EmitLValue(live);
@@ -4584,11 +4588,13 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
EmitBlock(contBlock);
if (lhs && rhs) {
- llvm::PHINode *phi =
- Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue");
- phi->addIncoming(lhs->getPointer(*this), lhsBlock);
- phi->addIncoming(rhs->getPointer(*this), rhsBlock);
- Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
+ Address lhsAddr = lhs->getAddress(*this);
+ Address rhsAddr = rhs->getAddress(*this);
+ llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
+ phi->addIncoming(lhsAddr.getPointer(), lhsBlock);
+ phi->addIncoming(rhsAddr.getPointer(), rhsBlock);
+ Address result(phi, lhsAddr.getElementType(),
+ std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
AlignmentSource alignSource =
std::max(lhs->getBaseInfo().getAlignmentSource(),
rhs->getBaseInfo().getAlignmentSource());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 5b56a587fa5f..3b996b89a1d7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -301,7 +301,7 @@ void AggExprEmitter::withReturnValueSlot(
if (!UseTemp)
return;
- assert(Dest.getPointer() != Src.getAggregatePointer());
+ assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer());
EmitFinalDestCopy(E->getType(), Src);
if (!RequiresDestruction && LifetimeStartInst) {
@@ -493,7 +493,7 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
- llvm::Type *llvmElementType = begin->getType()->getPointerElementType();
+ llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
// Consider initializing the array by copying from a global. For this to be
// more efficient than per-element initialization, the size of the elements
@@ -513,7 +513,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
Emitter.finalize(GV);
CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
GV->setAlignment(Align.getAsAlign());
- EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
+ Address GVAddr(GV, GV->getValueType(), Align);
+ EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy));
return;
}
}
@@ -565,8 +566,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
- LValue elementLV =
- CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
+ LValue elementLV = CGF.MakeAddrLValue(
+ Address(element, llvmElementType, elementAlign), elementType);
EmitInitializationToLValue(E->getInit(i), elementLV);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
index cc838bf38c6c..0571c498c377 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
@@ -1052,13 +1052,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
InitListElements =
cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
->getSize().getZExtValue();
- CurPtr =
- Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
- CurPtr.getPointer(),
- Builder.getSize(InitListElements),
- "string.init.end"),
- CurPtr.getAlignment().alignmentAtOffset(InitListElements *
- ElementSize));
+ CurPtr = Builder.CreateConstInBoundsGEP(
+ CurPtr, InitListElements, "string.init.end");
// Zero out the rest, if any remain.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
@@ -1135,7 +1130,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Switch back to initializing one base element at a time.
- CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
+ CurPtr = Builder.CreateElementBitCast(CurPtr, BeginPtr.getElementType());
}
// If all elements have already been initialized, skip any further
@@ -1594,7 +1589,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
- allocation = Address(allocation.getPointer(), allocAlign);
+ allocation = allocation.withAlignment(allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
@@ -1664,7 +1659,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
}
- allocation = Address(RV.getScalarVal(), allocationAlign);
+ allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign);
}
// Emit a null check on the allocation result if the allocation
@@ -1725,8 +1720,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// of optimization level.
if (CGM.getCodeGenOpts().StrictVTablePointers &&
allocator->isReservedGlobalPlacementOperator())
- result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
- result.getAlignment());
+ result = Builder.CreateLaunderInvariantGroup(result);
// Emit sanitizer checks for pointer value now, so that in the case of an
// array it was checked only once and not at each constructor call. We may
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
index 2c3d01153cf9..baa52a96bc5e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
@@ -899,7 +899,7 @@ static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
if (llvm::GlobalVariable *Addr =
CGM.getAddrOfConstantCompoundLiteralIfEmitted(E))
- return ConstantAddress(Addr, Align);
+ return ConstantAddress(Addr, Addr->getValueType(), Align);
LangAS addressSpace = E->getType().getAddressSpace();
@@ -921,7 +921,7 @@ static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
emitter.finalize(GV);
GV->setAlignment(Align.getAsAlign());
CGM.setAddrOfConstantCompoundLiteral(E, GV);
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
}
static llvm::Constant *
@@ -1988,6 +1988,9 @@ ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) {
ConstantLValue
ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) {
unsigned builtin = E->getBuiltinCallee();
+ if (builtin == Builtin::BI__builtin_function_start)
+ return CGM.GetFunctionStart(
+ E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext()));
if (builtin != Builtin::BI__builtin___CFStringMakeConstantString &&
builtin != Builtin::BI__builtin___NSStringMakeConstantString)
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index ae9434f96529..e32462eb635c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -1240,7 +1240,18 @@ Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
if (isa<llvm::IntegerType>(DstElementTy)) {
assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
- if (DstElementType->isSignedIntegerOrEnumerationType())
+ bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
+
+ // If we can't recognize overflow as undefined behavior, assume that
+ // overflow saturates. This protects against normal optimizations if we are
+ // compiling with non-standard FP semantics.
+ if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
+ llvm::Intrinsic::ID IID =
+ IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
+ return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
+ }
+
+ if (IsSigned)
return Builder.CreateFPToSI(Src, DstTy, "conv");
return Builder.CreateFPToUI(Src, DstTy, "conv");
}
@@ -2631,12 +2642,12 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
+ llvm::Type *elemTy = value->getType()->getPointerElementType();
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value->getType()->getPointerElementType(),
- value, numElts, "vla.inc");
+ value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
else
value = CGF.EmitCheckedInBoundsGEP(
- value, numElts, /*SignedIndices=*/false, isSubtraction,
+ elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
E->getExprLoc(), "vla.inc");
// Arithmetic on function pointers (!) is just +-1.
@@ -2647,7 +2658,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
+ value = CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
+ /*SignedIndices=*/false,
isSubtraction, E->getExprLoc(),
"incdec.funcptr");
value = Builder.CreateBitCast(value, input->getType());
@@ -2655,13 +2667,13 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value->getType()->getPointerElementType(),
- value, amt, "incdec.ptr");
+ value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
- isSubtraction, E->getExprLoc(),
- "incdec.ptr");
+ value = CGF.EmitCheckedInBoundsGEP(
+ elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.ptr");
}
// Vector increment/decrement.
@@ -2771,9 +2783,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
- /*SignedIndices=*/false, isSubtraction,
- E->getExprLoc(), "incdec.objptr");
+ value = CGF.EmitCheckedInBoundsGEP(
+ CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
@@ -3508,16 +3520,15 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
+ llvm::Type *elemTy = pointer->getType()->getPointerElementType();
if (CGF.getLangOpts().isSignedOverflowDefined()) {
index = CGF.Builder.CreateMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateGEP(
- pointer->getType()->getPointerElementType(), pointer, index,
- "add.ptr");
+ pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
} else {
index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer =
- CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
- op.E->getExprLoc(), "add.ptr");
+ pointer = CGF.EmitCheckedInBoundsGEP(
+ elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
+ "add.ptr");
}
return pointer;
}
@@ -3531,12 +3542,13 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(elementType);
if (CGF.getLangOpts().isSignedOverflowDefined())
- return CGF.Builder.CreateGEP(
- pointer->getType()->getPointerElementType(), pointer, index, "add.ptr");
+ return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
- return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
- op.E->getExprLoc(), "add.ptr");
+ return CGF.EmitCheckedInBoundsGEP(
+ elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
+ "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
@@ -5057,12 +5069,12 @@ static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
}
Value *
-CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
+CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
+ ArrayRef<Value *> IdxList,
bool SignedIndices, bool IsSubtraction,
SourceLocation Loc, const Twine &Name) {
llvm::Type *PtrTy = Ptr->getType();
- Value *GEPVal = Builder.CreateInBoundsGEP(
- PtrTy->getPointerElementType(), Ptr, IdxList, Name);
+ Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);
// If the pointer overflow sanitizer isn't enabled, do nothing.
if (!SanOpts.has(SanitizerKind::PointerOverflow))
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index ad505fc5a0d4..e3b0e069b830 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -366,11 +366,11 @@ template <class Derived> struct GenFuncBase {
llvm::ConstantInt::get(NumElts->getType(), BaseEltSize);
llvm::Value *SizeInBytes =
CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
- Address BC = CGF.Builder.CreateBitCast(DstAddr, CGF.CGM.Int8PtrTy);
+ Address BC = CGF.Builder.CreateElementBitCast(DstAddr, CGF.CGM.Int8Ty);
llvm::Value *DstArrayEnd =
CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BC.getPointer(), SizeInBytes);
- DstArrayEnd = CGF.Builder.CreateBitCast(DstArrayEnd, CGF.CGM.Int8PtrPtrTy,
- "dstarray.end");
+ DstArrayEnd = CGF.Builder.CreateBitCast(
+ DstArrayEnd, CGF.CGM.Int8PtrPtrTy, "dstarray.end");
llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
// Create the header block and insert the phi instructions.
@@ -426,9 +426,9 @@ template <class Derived> struct GenFuncBase {
assert(Addr.isValid() && "invalid address");
if (Offset.getQuantity() == 0)
return Addr;
- Addr = CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrTy);
+ Addr = CGF->Builder.CreateElementBitCast(Addr, CGF->CGM.Int8Ty);
Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity());
- return CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrPtrTy);
+ return CGF->Builder.CreateElementBitCast(Addr, CGF->CGM.Int8PtrTy);
}
Address getAddrWithOffset(Address Addr, CharUnits StructFieldOffset,
@@ -491,9 +491,8 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
Alignments[I] = Addrs[I].getAlignment();
- Ptrs[I] =
- CallerCGF.Builder.CreateBitCast(Addrs[I], CallerCGF.CGM.Int8PtrPtrTy)
- .getPointer();
+ Ptrs[I] = CallerCGF.Builder.CreateElementBitCast(
+ Addrs[I], CallerCGF.CGM.Int8PtrTy).getPointer();
}
if (llvm::Function *F =
@@ -554,19 +553,21 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
return;
QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
- llvm::PointerType *PtrTy = this->CGF->ConvertType(RT)->getPointerTo();
+ llvm::Type *Ty = this->CGF->ConvertType(RT);
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
LValue DstBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateBitCast(DstAddr, PtrTy), FT);
+ this->CGF->Builder.CreateElementBitCast(DstAddr, Ty), FT);
DstLV = this->CGF->EmitLValueForField(DstBase, FD);
Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], Offset);
LValue SrcBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateBitCast(SrcAddr, PtrTy), FT);
+ this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty), FT);
SrcLV = this->CGF->EmitLValueForField(SrcBase, FD);
} else {
- llvm::PointerType *Ty = this->CGF->ConvertTypeForMem(FT)->getPointerTo();
- Address DstAddr = this->CGF->Builder.CreateBitCast(Addrs[DstIdx], Ty);
- Address SrcAddr = this->CGF->Builder.CreateBitCast(Addrs[SrcIdx], Ty);
+ llvm::Type *Ty = this->CGF->ConvertTypeForMem(FT);
+ Address DstAddr =
+ this->CGF->Builder.CreateElementBitCast(Addrs[DstIdx], Ty);
+ Address SrcAddr =
+ this->CGF->Builder.CreateElementBitCast(Addrs[SrcIdx], Ty);
DstLV = this->CGF->MakeAddrLValue(DstAddr, FT);
SrcLV = this->CGF->MakeAddrLValue(SrcAddr, FT);
}
@@ -817,7 +818,7 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
GenDefaultInitialize Gen(getContext());
Address DstPtr =
- Builder.CreateBitCast(Dst.getAddress(*this), CGM.Int8PtrPtrTy);
+ Builder.CreateElementBitCast(Dst.getAddress(*this), CGM.Int8PtrTy);
Gen.setCGF(this);
QualType QT = Dst.getType();
QT = Dst.isVolatile() ? QT.withVolatile() : QT;
@@ -830,7 +831,7 @@ static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
std::array<Address, N> Addrs) {
auto SetArtificialLoc = ApplyDebugLocation::CreateArtificial(CGF);
for (unsigned I = 0; I < N; ++I)
- Addrs[I] = CGF.Builder.CreateBitCast(Addrs[I], CGF.CGM.Int8PtrPtrTy);
+ Addrs[I] = CGF.Builder.CreateElementBitCast(Addrs[I], CGF.CGM.Int8PtrTy);
QT = IsVolatile ? QT.withVolatile() : QT;
Gen.callFunc(FuncName, QT, Addrs, CGF);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index ac26f0d4232c..b5bcf157036d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -3915,8 +3915,8 @@ static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF,
Args.push_back(
llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT)));
Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.getValueOr(0)));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.getValueOr(0)));
};
assert(!Version.empty() && "unexpected empty version");
@@ -3952,8 +3952,8 @@ CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) {
Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
llvm::Value *Args[] = {
llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()),
- llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0),
- llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0),
+ llvm::ConstantInt::get(CGM.Int32Ty, Min.getValueOr(0)),
+ llvm::ConstantInt::get(CGM.Int32Ty, SMin.getValueOr(0))
};
llvm::Value *CallRes =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index e016644150b4..b2bf60d2c0fc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -978,7 +978,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return ConstantAddress(old->getValue(), Align);
+ return ConstantAddress(
+ old->getValue(), old->getValue()->getType()->getPointerElementType(),
+ Align);
bool isNonASCII = SL->containsNonAscii();
@@ -1000,7 +1002,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto *ObjCStr = llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(Int64Ty, str), IdTy);
ObjCStrings[Str] = ObjCStr;
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, IdTy->getPointerElementType(), Align);
}
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -1114,7 +1116,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Constant *ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStrGV, IdTy);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, IdTy->getPointerElementType(), Align);
}
void PushProperty(ConstantArrayBuilder &PropertiesArray,
@@ -2476,7 +2478,7 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return ConstantAddress(old->getValue(), Align);
+ return ConstantAddress(old->getValue(), Int8Ty, Align);
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -2503,7 +2505,7 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, Int8Ty, Align);
}
///Generates a message send where the super is the receiver. This is a message
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index 5b925359ac25..425d1a793439 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -1983,7 +1983,8 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
GetConstantStringEntry(NSConstantStringMap, Literal, StringLength);
if (auto *C = Entry.second)
- return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+ return ConstantAddress(
+ C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
// If we don't already have it, get _NSConstantStringClassReference.
llvm::Constant *Class = getNSConstantStringClassRef();
@@ -2036,7 +2037,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
: NSStringSection);
Entry.second = GV;
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
enum {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 75709b3c7e78..e35c15421520 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -15,6 +15,7 @@
#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
@@ -687,8 +688,6 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
// Drill down to the base element type on both arrays.
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
- DestAddr =
- CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
if (DRD)
SrcAddr =
CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
@@ -775,7 +774,7 @@ LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
}
void ReductionCodeGen::emitAggregateInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
+ CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD) {
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
@@ -788,7 +787,7 @@ void ReductionCodeGen::emitAggregateInitialization(
EmitDeclareReductionInit,
EmitDeclareReductionInit ? ClausesData[N].ReductionOp
: PrivateVD->getInit(),
- DRD, SharedLVal.getAddress(CGF));
+ DRD, SharedAddr);
}
ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
@@ -882,7 +881,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
}
void ReductionCodeGen::emitInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
+ CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
assert(SharedAddresses.size() > N && "No variable was generated");
const auto *PrivateVD =
@@ -892,21 +891,15 @@ void ReductionCodeGen::emitInitialization(
QualType PrivateType = PrivateVD->getType();
PrivateAddr = CGF.Builder.CreateElementBitCast(
PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
- QualType SharedType = SharedAddresses[N].first.getType();
- SharedLVal = CGF.MakeAddrLValue(
- CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(SharedType)),
- SharedType, SharedAddresses[N].first.getBaseInfo(),
- CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
if (DRD && DRD->getInitializer())
(void)DefaultInit(CGF);
- emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
+ emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
} else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
(void)DefaultInit(CGF);
+ QualType SharedType = SharedAddresses[N].first.getType();
emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
- PrivateAddr, SharedLVal.getAddress(CGF),
- SharedLVal.getType());
+ PrivateAddr, SharedAddr, SharedType);
} else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
!CGF.isTrivialInitializer(PrivateVD->getInit())) {
CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
@@ -2016,12 +2009,13 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
StringRef Name) {
std::string Suffix = getName({"artificial", ""});
llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
- llvm::Value *GAddr =
+ llvm::GlobalVariable *GAddr =
getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
CGM.getTarget().isTLSSupported()) {
- cast<llvm::GlobalVariable>(GAddr)->setThreadLocal(/*Val=*/true);
- return Address(GAddr, CGM.getContext().getTypeAlignInChars(VarType));
+ GAddr->setThreadLocal(/*Val=*/true);
+ return Address(GAddr, GAddr->getValueType(),
+ CGM.getContext().getTypeAlignInChars(VarType));
}
std::string CacheSuffix = getName({"cache", ""});
llvm::Value *Args[] = {
@@ -2084,7 +2078,8 @@ void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
@@ -2175,7 +2170,7 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
return ThreadIDTemp;
}
-llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
+llvm::GlobalVariable *CGOpenMPRuntime::getOrCreateInternalVariable(
llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
@@ -2183,7 +2178,7 @@ llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
StringRef RuntimeName = Out.str();
auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
if (Elem.second) {
- assert(Elem.second->getType()->getPointerElementType() == Ty &&
+ assert(Elem.second->getType()->isOpaqueOrPointeeTypeMatches(Ty) &&
"OMP internal variable has different type than requested");
return &*Elem.second;
}
@@ -4498,10 +4493,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
LValue Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(AffinitiesArray.getElementType(),
- AffinitiesArray.getPointer(), Idx),
- AffinitiesArray.getAlignment()),
- KmpTaskAffinityInfoTy);
+ CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
// affs[i].base_addr = &<Affinities[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
@@ -4665,12 +4657,10 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ Address DepObjAddr = CGF.Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
+ DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
LValue BaseAddrLVal = CGF.EmitLValueForField(
NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
@@ -4706,10 +4696,7 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
- DependenciesArray.getPointer(), Idx),
- DependenciesArray.getAlignment()),
- KmpDependInfoTy);
+ CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
}
// deps[i].base_addr = &<Dependencies[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
@@ -4766,12 +4753,10 @@ emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
Base.getAddress(CGF), KmpDependInfoPtrT);
Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ Address DepObjAddr = CGF.Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
+ DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
LValue BaseAddrLVal = CGF.EmitLValueForField(
NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
@@ -4827,12 +4812,10 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
Base.getTBAAInfo());
// Get number of elements in a single depobj.
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ Address DepObjAddr = CGF.Builder.CreateGEP(
+ Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
+ DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
LValue BaseAddrLVal = CGF.EmitLValueForField(
NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
@@ -4844,10 +4827,7 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
ElSize,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Address DepAddr =
- Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
- DependenciesArray.getPointer(), Pos),
- DependenciesArray.getAlignment());
+ Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
// Increase pos.
@@ -5929,25 +5909,20 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
- LValue OrigLVal;
+ Address OrigAddr = Address::invalid();
// If initializer uses initializer from declare reduction construct, emit a
// pointer to the address of the original reduction item (reuired by reduction
// initializer)
if (RCG.usesReductionInitializer(N)) {
Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
- SharedAddr = CGF.EmitLoadOfPointer(
+ OrigAddr = CGF.EmitLoadOfPointer(
SharedAddr,
CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
- OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
- } else {
- OrigLVal = CGF.MakeNaturalAlignAddrLValue(
- llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
- CGM.getContext().VoidPtrTy);
}
// Emit the initializer:
// %0 = bitcast void* %arg to <type>*
// store <type> <init>, <type>* %0
- RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal,
+ RCG.emitInitialization(CGF, N, PrivateAddr, OrigAddr,
[](CodeGenFunction &) { return false; });
CGF.FinishFunction();
return Fn;
@@ -6122,7 +6097,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
- TaskRedInput.getPointer(), Idxs,
+ TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
/*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
".rd_input.gep.");
LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
@@ -6620,6 +6595,8 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
OutlinedFn->addFnAttr("omp_target_thread_limit",
std::to_string(DefaultValThreads));
}
+
+ CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
}
/// Checks if the expression is constant or does not have non-trivial function
@@ -12680,12 +12657,11 @@ void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
// Last value of the lastprivate conditional.
// decltype(priv_a) last_a;
- llvm::Constant *Last = getOrCreateInternalVariable(
+ llvm::GlobalVariable *Last = getOrCreateInternalVariable(
CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
- cast<llvm::GlobalVariable>(Last)->setAlignment(
- LVal.getAlignment().getAsAlign());
- LValue LastLVal =
- CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment());
+ Last->setAlignment(LVal.getAlignment().getAsAlign());
+ LValue LastLVal = CGF.MakeAddrLValue(
+ Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
// Global loop counter. Required to handle inner parallel-for regions.
// iv
@@ -12812,7 +12788,7 @@ void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
for (const auto &Pair : It->DeclToUniqueName) {
const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
- if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0)
+ if (!CS->capturesVariable(VD) || IgnoredDecls.contains(VD))
continue;
auto I = LPCI->getSecond().find(Pair.first);
assert(I != LPCI->getSecond().end() &&
@@ -12858,7 +12834,8 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
if (!GV)
return;
LValue LPLVal = CGF.MakeAddrLValue(
- GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment());
+ Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
+ PrivLVal.getType().getNonReferenceType());
llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
CGF.EmitStoreOfScalar(Res, PrivLVal);
}
@@ -12887,7 +12864,8 @@ void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
llvm_unreachable("Not supported in SIMD-only mode");
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index 527a23a8af6a..b83ec78696d1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -162,10 +162,10 @@ private:
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
- /// \param SharedLVal Address of the original shared variable.
+ /// \param SharedAddr Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
- Address PrivateAddr, LValue SharedLVal,
+ Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD);
public:
@@ -187,10 +187,10 @@ public:
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
- /// \param SharedLVal Address of the original shared variable.
+ /// \param SharedAddr Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
- LValue SharedLVal,
+ Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
@@ -471,8 +471,8 @@ private:
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
- llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
- InternalVars;
+ llvm::StringMap<llvm::AssertingVH<llvm::GlobalVariable>,
+ llvm::BumpPtrAllocator> InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
@@ -829,9 +829,9 @@ private:
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
- llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
- const llvm::Twine &Name,
- unsigned AddressSpace = 0);
+ llvm::GlobalVariable *getOrCreateInternalVariable(llvm::Type *Ty,
+ const llvm::Twine &Name,
+ unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
@@ -1015,11 +1015,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any, or nullptr.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
+ const Expr *IfCond, llvm::Value *NumThreads);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
@@ -1991,11 +1993,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any, or nullptr.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) override;
+ const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index dcb224f33156..866454ddeaed 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1221,11 +1221,7 @@ void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
- // Do nothing in case of SPMD mode and L0 parallel.
- if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
- return;
-
- CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
+ // Nothing to do.
}
void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
@@ -1510,13 +1506,16 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
- auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
- IfCond](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
+ NumThreads](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
CGBuilderTy &Bld = CGF.Builder;
+ llvm::Value *NumThreadsVal = NumThreads;
llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
if (WFn)
@@ -1556,13 +1555,18 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
else
IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
- assert(IfCondVal && "Expected a value");
+ if (!NumThreadsVal)
+ NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1);
+ else
+ NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty),
+
+ assert(IfCondVal && "Expected a value");
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *Args[] = {
RTLoc,
getThreadID(CGF, Loc),
IfCondVal,
- llvm::ConstantInt::get(CGF.Int32Ty, -1),
+ NumThreadsVal,
llvm::ConstantInt::get(CGF.Int32Ty, -1),
FnPtr,
ID,
@@ -2186,11 +2190,8 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// elemptr = ((CopyType*)(elemptrptr)) + I
Address ElemPtr = Address(ElemPtrPtr, Align);
ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
- if (NumIters > 1) {
- ElemPtr = Address(Bld.CreateGEP(ElemPtr.getElementType(),
- ElemPtr.getPointer(), Cnt),
- ElemPtr.getAlignment());
- }
+ if (NumIters > 1)
+ ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
// Get pointer to location in transfer medium.
// MediumPtr = &medium[warp_id]
@@ -2256,11 +2257,8 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
Address TargetElemPtr = Address(TargetElemPtrVal, Align);
TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
- if (NumIters > 1) {
- TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getElementType(),
- TargetElemPtr.getPointer(), Cnt),
- TargetElemPtr.getAlignment());
- }
+ if (NumIters > 1)
+ TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
// *TargetElemPtr = SrcMediumVal;
llvm::Value *SrcMediumValue =
@@ -3899,6 +3897,7 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX1033:
case CudaArch::GFX1034:
case CudaArch::GFX1035:
+ case CudaArch::Generic:
case CudaArch::UNUSED:
case CudaArch::UNKNOWN:
break;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index ac51264d7685..1d30c5061743 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -257,10 +257,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any,
+ /// or nullptr.
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) override;
+ const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index d399ff919cc3..ef0068cd3b0c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -2454,7 +2454,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
if (RetAI.isDirect() || RetAI.isExtend()) {
// Make a fake lvalue for the return value slot.
- LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
+ LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
*this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
ResultRegDests, AsmString, S.getNumOutputs());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index f6853a22cd36..4c11f7d67534 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -24,10 +24,13 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/Support/AtomicOrdering.h"
using namespace clang;
using namespace CodeGen;
@@ -375,8 +378,7 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
Ctx.getPointerType(DstType), Loc);
Address TmpAddr =
- CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
- .getAddress(CGF);
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
return TmpAddr;
}
@@ -1245,7 +1247,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
- RedCG.getSharedLValue(Count),
+ RedCG.getSharedLValue(Count).getAddress(*this),
[&Emission](CodeGenFunction &CGF) {
CGF.EmitAutoVarInit(Emission);
return true;
@@ -1557,14 +1559,14 @@ static void emitCommonOMPParallelDirective(
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
const CodeGenBoundParametersTy &CodeGenBoundParameters) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
+ llvm::Value *NumThreads = nullptr;
llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- llvm::Value *NumThreads =
- CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
- /*IgnoreResultAssign=*/true);
+ NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
+ /*IgnoreResultAssign=*/true);
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
CGF, NumThreads, NumThreadsClause->getBeginLoc());
}
@@ -1591,7 +1593,7 @@ static void emitCommonOMPParallelDirective(
CodeGenBoundParameters(CGF, S, CapturedVars);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
- CapturedVars, IfCond);
+ CapturedVars, IfCond, NumThreads);
}
static bool isAllocatableDecl(const VarDecl *VD) {
@@ -1972,7 +1974,7 @@ CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) {
// Pop the \p Depth loops requested by the call from that stack and restore
// the previous context.
- OMPLoopNestStack.set_size(OMPLoopNestStack.size() - Depth);
+ OMPLoopNestStack.pop_back_n(Depth);
ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth;
return Result;
@@ -4299,10 +4301,10 @@ public:
PrivateDecls.push_back(VD);
}
}
- void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; }
- void VisitCapturedStmt(const CapturedStmt *) { return; }
- void VisitLambdaExpr(const LambdaExpr *) { return; }
- void VisitBlockExpr(const BlockExpr *) { return; }
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *) {}
+ void VisitCapturedStmt(const CapturedStmt *) {}
+ void VisitLambdaExpr(const LambdaExpr *) {}
+ void VisitBlockExpr(const BlockExpr *) {}
void VisitStmt(const Stmt *S) {
if (!S)
return;
@@ -4431,6 +4433,53 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
UntiedLocalVars;
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
+ // Generate debug info for variables present in shared clause.
+ if (auto *DI = CGF.getDebugInfo()) {
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
+ CGF.CapturedStmtInfo->getCaptureFields();
+ llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
+ if (CaptureFields.size() && ContextValue) {
+ unsigned CharWidth = CGF.getContext().getCharWidth();
+ // The shared variables are packed together as members of structure.
+ // So the address of each shared variable can be computed by adding
+ // offset of it (within record) to the base address of record. For each
+ // shared variable, debug intrinsic llvm.dbg.declare is generated with
+ // appropriate expressions (DIExpression).
+ // Ex:
+ // %12 = load %struct.anon*, %struct.anon** %__context.addr.i
+ // call void @llvm.dbg.declare(metadata %struct.anon* %12,
+ // metadata !svar1,
+ // metadata !DIExpression(DW_OP_deref))
+ // call void @llvm.dbg.declare(metadata %struct.anon* %12,
+ // metadata !svar2,
+ // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
+ for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
+ const VarDecl *SharedVar = It->first;
+ RecordDecl *CaptureRecord = It->second->getParent();
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(CaptureRecord);
+ unsigned Offset =
+ Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
+ (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
+ CGF.Builder, false);
+ llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
+ // Get the call dbg.declare instruction we just created and update
+ // its DIExpression to add offset to base address.
+ if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) {
+ SmallVector<uint64_t, 8> Ops;
+ // Add offset to the base address if non zero.
+ if (Offset) {
+ Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Ops.push_back(Offset);
+ }
+ Ops.push_back(llvm::dwarf::DW_OP_deref);
+ auto &Ctx = DDI->getContext();
+ llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops);
+ Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr));
+ }
+ }
+ }
+ }
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
@@ -5918,6 +5967,9 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
+ case OMPC_compare:
+ // Do nothing here as we already emit an error.
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
index 4b39a0520833..f01eece042f8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
@@ -47,6 +47,8 @@ class RValue {
llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
// Stores second value and volatility.
llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+ // Stores element type for aggregate values.
+ llvm::Type *ElementType;
public:
bool isScalar() const { return V1.getInt() == Scalar; }
@@ -71,7 +73,8 @@ public:
Address getAggregateAddress() const {
assert(isAggregate() && "Not an aggregate!");
auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
- return Address(V1.getPointer(), CharUnits::fromQuantity(align));
+ return Address(
+ V1.getPointer(), ElementType, CharUnits::fromQuantity(align));
}
llvm::Value *getAggregatePointer() const {
assert(isAggregate() && "Not an aggregate!");
@@ -108,6 +111,7 @@ public:
RValue ER;
ER.V1.setPointer(addr.getPointer());
ER.V1.setInt(Aggregate);
+ ER.ElementType = addr.getElementType();
auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
@@ -175,6 +179,7 @@ class LValue {
} LVType;
llvm::Value *V;
+ llvm::Type *ElementType;
union {
// Index into a vector subscript: V[i]
@@ -230,6 +235,13 @@ private:
LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
assert((!Alignment.isZero() || Type->isIncompleteType()) &&
"initializing l-value with zero alignment!");
+ if (isGlobalReg())
+ assert(ElementType == nullptr && "Global reg does not store elem type");
+ else
+ assert(llvm::cast<llvm::PointerType>(V->getType())
+ ->isOpaqueOrPointeeTypeMatches(ElementType) &&
+ "Pointer element type mismatch");
+
this->Type = Type;
this->Quals = Quals;
const unsigned MaxAlign = 1U << 31;
@@ -327,17 +339,18 @@ public:
return V;
}
Address getAddress(CodeGenFunction &CGF) const {
- return Address(getPointer(CGF), getAlignment());
+ return Address(getPointer(CGF), ElementType, getAlignment());
}
void setAddress(Address address) {
assert(isSimple());
V = address.getPointer();
+ ElementType = address.getElementType();
Alignment = address.getAlignment().getQuantity();
}
// vector elt lvalue
Address getVectorAddress() const {
- return Address(getVectorPointer(), getAlignment());
+ return Address(getVectorPointer(), ElementType, getAlignment());
}
llvm::Value *getVectorPointer() const {
assert(isVectorElt());
@@ -349,7 +362,7 @@ public:
}
Address getMatrixAddress() const {
- return Address(getMatrixPointer(), getAlignment());
+ return Address(getMatrixPointer(), ElementType, getAlignment());
}
llvm::Value *getMatrixPointer() const {
assert(isMatrixElt());
@@ -362,7 +375,7 @@ public:
// extended vector elements.
Address getExtVectorAddress() const {
- return Address(getExtVectorPointer(), getAlignment());
+ return Address(getExtVectorPointer(), ElementType, getAlignment());
}
llvm::Value *getExtVectorPointer() const {
assert(isExtVectorElt());
@@ -375,7 +388,7 @@ public:
// bitfield lvalue
Address getBitFieldAddress() const {
- return Address(getBitFieldPointer(), getAlignment());
+ return Address(getBitFieldPointer(), ElementType, getAlignment());
}
llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
const CGBitFieldInfo &getBitFieldInfo() const {
@@ -395,6 +408,7 @@ public:
R.LVType = Simple;
assert(address.getPointer()->getType()->isPointerTy());
R.V = address.getPointer();
+ R.ElementType = address.getElementType();
R.Initialize(type, qs, address.getAlignment(), BaseInfo, TBAAInfo);
return R;
}
@@ -405,6 +419,7 @@ public:
LValue R;
R.LVType = VectorElt;
R.V = vecAddress.getPointer();
+ R.ElementType = vecAddress.getElementType();
R.VectorIdx = Idx;
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
@@ -417,6 +432,7 @@ public:
LValue R;
R.LVType = ExtVectorElt;
R.V = vecAddress.getPointer();
+ R.ElementType = vecAddress.getElementType();
R.VectorElts = Elts;
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
@@ -435,17 +451,20 @@ public:
LValue R;
R.LVType = BitField;
R.V = Addr.getPointer();
+ R.ElementType = Addr.getElementType();
R.BitFieldInfo = &Info;
R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo,
TBAAInfo);
return R;
}
- static LValue MakeGlobalReg(Address Reg, QualType type) {
+ static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment,
+ QualType type) {
LValue R;
R.LVType = GlobalReg;
- R.V = Reg.getPointer();
- R.Initialize(type, type.getQualifiers(), Reg.getAlignment(),
+ R.V = V;
+ R.ElementType = nullptr;
+ R.Initialize(type, type.getQualifiers(), alignment,
LValueBaseInfo(AlignmentSource::Decl), TBAAAccessInfo());
return R;
}
@@ -456,6 +475,7 @@ public:
LValue R;
R.LVType = MatrixElt;
R.V = matAddress.getPointer();
+ R.ElementType = matAddress.getElementType();
R.VectorIdx = Idx;
R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
BaseInfo, TBAAInfo);
@@ -470,13 +490,11 @@ public:
/// An aggregate value slot.
class AggValueSlot {
/// The address.
- llvm::Value *Addr;
+ Address Addr;
// Qualifiers
Qualifiers Quals;
- unsigned Alignment;
-
/// DestructedFlag - This is set to true if some external code is
/// responsible for setting up a destructor for the slot. Otherwise
/// the code which constructs it should push the appropriate cleanup.
@@ -520,6 +538,14 @@ class AggValueSlot {
/// them.
bool SanitizerCheckedFlag : 1;
+ AggValueSlot(Address Addr, Qualifiers Quals, bool DestructedFlag,
+ bool ObjCGCFlag, bool ZeroedFlag, bool AliasedFlag,
+ bool OverlapFlag, bool SanitizerCheckedFlag)
+ : Addr(Addr), Quals(Quals), DestructedFlag(DestructedFlag),
+ ObjCGCFlag(ObjCGCFlag), ZeroedFlag(ZeroedFlag),
+ AliasedFlag(AliasedFlag), OverlapFlag(OverlapFlag),
+ SanitizerCheckedFlag(SanitizerCheckedFlag) {}
+
public:
enum IsAliased_t { IsNotAliased, IsAliased };
enum IsDestructed_t { IsNotDestructed, IsDestructed };
@@ -553,22 +579,8 @@ public:
Overlap_t mayOverlap,
IsZeroed_t isZeroed = IsNotZeroed,
IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) {
- AggValueSlot AV;
- if (addr.isValid()) {
- AV.Addr = addr.getPointer();
- AV.Alignment = addr.getAlignment().getQuantity();
- } else {
- AV.Addr = nullptr;
- AV.Alignment = 0;
- }
- AV.Quals = quals;
- AV.DestructedFlag = isDestructed;
- AV.ObjCGCFlag = needsGC;
- AV.ZeroedFlag = isZeroed;
- AV.AliasedFlag = isAliased;
- AV.OverlapFlag = mayOverlap;
- AV.SanitizerCheckedFlag = isChecked;
- return AV;
+ return AggValueSlot(addr, quals, isDestructed, needsGC, isZeroed, isAliased,
+ mayOverlap, isChecked);
}
static AggValueSlot
@@ -609,19 +621,19 @@ public:
}
llvm::Value *getPointer() const {
- return Addr;
+ return Addr.getPointer();
}
Address getAddress() const {
- return Address(Addr, getAlignment());
+ return Addr;
}
bool isIgnored() const {
- return Addr == nullptr;
+ return !Addr.isValid();
}
CharUnits getAlignment() const {
- return CharUnits::fromQuantity(Alignment);
+ return Addr.getAlignment();
}
IsAliased_t isPotentiallyAliased() const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
index 52c54d3c7a72..b72b16cf2b5f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
@@ -571,7 +571,6 @@ void BackendConsumer::SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &DI) {
// If Loc is invalid, we still need to report the issue, it just gets no
// location info.
Diags.Report(Loc, DiagID).AddString(Message);
- return;
}
bool
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index d87cf2d49720..e6adec6948af 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -188,8 +188,8 @@ LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
- TBAAInfo);
+ Address Addr(V, ConvertTypeForMem(T), Alignment);
+ return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
}
/// Given a value of type T* that may not be to a complete object,
@@ -200,7 +200,8 @@ CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
TBAAAccessInfo TBAAInfo;
CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
/* forPointeeType= */ true);
- return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
+ Address Addr(V, ConvertTypeForMem(T), Align);
+ return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
}
@@ -243,7 +244,7 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
case Type::Enum:
case Type::ObjCObjectPointer:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
return TEK_Scalar;
// Complexes.
@@ -1070,7 +1071,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
+ ReturnValue = Address(&*AI, ConvertType(RetTy),
+ CurFnInfo->getReturnInfo().getIndirectAlign());
if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
ReturnValuePointer =
CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
@@ -1298,47 +1300,44 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo) {
+ assert(Fn && "generating code for null Function");
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
CurGD = GD;
FunctionArgList Args;
QualType ResTy = BuildFunctionArgList(GD, Args);
- // When generating code for a builtin with an inline declaration, use a
- // mangled name to hold the actual body, while keeping an external definition
- // in case the function pointer is referenced somewhere.
- if (Fn) {
- if (FD->isInlineBuiltinDeclaration()) {
- std::string FDInlineName = (Fn->getName() + ".inline").str();
- llvm::Module *M = Fn->getParent();
- llvm::Function *Clone = M->getFunction(FDInlineName);
- if (!Clone) {
- Clone = llvm::Function::Create(Fn->getFunctionType(),
- llvm::GlobalValue::InternalLinkage,
- Fn->getAddressSpace(), FDInlineName, M);
- Clone->addFnAttr(llvm::Attribute::AlwaysInline);
- }
- Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
- Fn = Clone;
+ if (FD->isInlineBuiltinDeclaration()) {
+ // When generating code for a builtin with an inline declaration, use a
+ // mangled name to hold the actual body, while keeping an external
+ // definition in case the function pointer is referenced somewhere.
+ std::string FDInlineName = (Fn->getName() + ".inline").str();
+ llvm::Module *M = Fn->getParent();
+ llvm::Function *Clone = M->getFunction(FDInlineName);
+ if (!Clone) {
+ Clone = llvm::Function::Create(Fn->getFunctionType(),
+ llvm::GlobalValue::InternalLinkage,
+ Fn->getAddressSpace(), FDInlineName, M);
+ Clone->addFnAttr(llvm::Attribute::AlwaysInline);
}
-
+ Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ Fn = Clone;
+ } else {
// Detect the unusual situation where an inline version is shadowed by a
// non-inline version. In that case we should pick the external one
// everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
// to detect that situation before we reach codegen, so do some late
// replacement.
- else {
- for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
- PD = PD->getPreviousDecl()) {
- if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
- std::string FDInlineName = (Fn->getName() + ".inline").str();
- llvm::Module *M = Fn->getParent();
- if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
- Clone->replaceAllUsesWith(Fn);
- Clone->eraseFromParent();
- }
- break;
+ for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
+ PD = PD->getPreviousDecl()) {
+ if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
+ std::string FDInlineName = (Fn->getName() + ".inline").str();
+ llvm::Module *M = Fn->getParent();
+ if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
+ Clone->replaceAllUsesWith(Fn);
+ Clone->eraseFromParent();
}
+ break;
}
}
}
@@ -1347,8 +1346,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
if (FD->hasAttr<NoDebugAttr>()) {
// Clear non-distinct debug info that was possibly attached to the function
// due to an earlier declaration without the nodebug attribute
- if (Fn)
- Fn->setSubprogram(nullptr);
+ Fn->setSubprogram(nullptr);
// Disable debug info indefinitely for this function
DebugInfo = nullptr;
}
@@ -2202,12 +2200,13 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::Record:
case Type::Enum:
case Type::Elaborated:
+ case Type::Using:
case Type::TemplateSpecialization:
case Type::ObjCTypeParam:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
- case Type::ExtInt:
+ case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index ff5b6634da1c..f76ce8a6400d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -459,6 +459,11 @@ public:
/// Get the name of the capture helper.
virtual StringRef getHelperName() const { return "__captured_stmt"; }
+ /// Get the CaptureFields
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
+ return CaptureFields;
+ }
+
private:
/// The kind of captured statement being generated.
CapturedRegionKind Kind;
@@ -2494,14 +2499,16 @@ public:
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
AlignmentSource Source = AlignmentSource::Type) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
+ Address Addr(V, ConvertTypeForMem(T), Alignment);
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ CGM.getTBAAAccessInfo(T));
}
- LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
- LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- BaseInfo, TBAAInfo);
+ LValue
+ MakeAddrLValueWithoutTBAA(Address Addr, QualType T,
+ AlignmentSource Source = AlignmentSource::Type) {
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ TBAAAccessInfo());
}
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
@@ -3128,15 +3135,18 @@ public:
class ParamValue {
llvm::Value *Value;
+ llvm::Type *ElementType;
unsigned Alignment;
- ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
+ ParamValue(llvm::Value *V, llvm::Type *T, unsigned A)
+ : Value(V), ElementType(T), Alignment(A) {}
public:
static ParamValue forDirect(llvm::Value *value) {
- return ParamValue(value, 0);
+ return ParamValue(value, nullptr, 0);
}
static ParamValue forIndirect(Address addr) {
assert(!addr.getAlignment().isZero());
- return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
+ return ParamValue(addr.getPointer(), addr.getElementType(),
+ addr.getAlignment().getQuantity());
}
bool isIndirect() const { return Alignment != 0; }
@@ -3149,7 +3159,7 @@ public:
Address getIndirectAddress() const {
assert(isIndirect());
- return Address(Value, CharUnits::fromQuantity(Alignment));
+ return Address(Value, ElementType, CharUnits::fromQuantity(Alignment));
}
};
@@ -4405,7 +4415,7 @@ public:
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
- void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
bool PerformInit);
llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
@@ -4556,7 +4566,7 @@ public:
/// \p SignedIndices indicates whether any of the GEP indices are signed.
/// \p IsSubtraction indicates whether the expression used to form the GEP
/// is a subtraction.
- llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
+ llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
ArrayRef<llvm::Value *> IdxList,
bool SignedIndices,
bool IsSubtraction,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 9ba1a5c25e81..36b7ce87336c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -2832,7 +2832,7 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
// Look for an existing global.
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
ConstantEmitter Emitter(*this);
llvm::Constant *Init;
@@ -2866,15 +2866,15 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
setDSOLocal(GV);
- llvm::Constant *Addr = GV;
if (!V.isAbsent()) {
Emitter.finalize(GV);
- } else {
- llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
- Addr = llvm::ConstantExpr::getBitCast(
- GV, Ty->getPointerTo(GV->getAddressSpace()));
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
- return ConstantAddress(Addr, Alignment);
+
+ llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
+ llvm::Constant *Addr = llvm::ConstantExpr::getBitCast(
+ GV, Ty->getPointerTo(GV->getAddressSpace()));
+ return ConstantAddress(Addr, Ty, Alignment);
}
ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
@@ -2883,7 +2883,7 @@ ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
ConstantEmitter Emitter(*this);
llvm::Constant *Init = Emitter.emitForInitializer(
@@ -2901,7 +2901,7 @@ ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
Emitter.finalize(GV);
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
@@ -2916,7 +2916,7 @@ ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
if (Entry) {
unsigned AS = getContext().getTargetAddressSpace(VD->getType());
auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
- return ConstantAddress(Ptr, Alignment);
+ return ConstantAddress(Ptr, DeclTy, Alignment);
}
llvm::Constant *Aliasee;
@@ -2932,7 +2932,7 @@ ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
F->setLinkage(llvm::Function::ExternalWeakLinkage);
WeakRefReferences.insert(F);
- return ConstantAddress(Aliasee, Alignment);
+ return ConstantAddress(Aliasee, DeclTy, Alignment);
}
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
@@ -3886,6 +3886,14 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
return F;
}
+llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) {
+ llvm::GlobalValue *F =
+ cast<llvm::GlobalValue>(GetAddrOfFunction(Decl)->stripPointerCasts());
+
+ return llvm::ConstantExpr::getBitCast(llvm::NoCFIValue::get(F),
+ llvm::Type::getInt8PtrTy(VMContext));
+}
+
static const FunctionDecl *
GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
@@ -5228,7 +5236,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
StringLength);
if (auto *C = Entry.second)
- return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+ return ConstantAddress(
+ C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
@@ -5409,7 +5418,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
}
Entry.second = GV;
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
bool CodeGenModule::getExpressionLocationsEnabled() const {
@@ -5527,7 +5536,7 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
GV->setAlignment(Alignment.getAsAlign());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
}
@@ -5557,7 +5566,7 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
QualType());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
@@ -5590,7 +5599,7 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
GV->setAlignment(Alignment.getAsAlign());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
}
@@ -5604,7 +5613,7 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
*Entry = GV;
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
@@ -5634,7 +5643,9 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
nullptr);
}
- return ConstantAddress(InsertResult.first->second, Align);
+ return ConstantAddress(
+ InsertResult.first->second,
+ InsertResult.first->second->getType()->getPointerElementType(), Align);
}
// FIXME: If an externally-visible declaration extends multiple temporaries,
@@ -5725,7 +5736,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
}
Entry = CV;
- return ConstantAddress(CV, Align);
+ return ConstantAddress(CV, Type, Align);
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
@@ -6398,6 +6409,11 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
llvm::Metadata *
CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
StringRef Suffix) {
+ if (auto *FnType = T->getAs<FunctionProtoType>())
+ T = getContext().getFunctionType(
+ FnType->getReturnType(), FnType->getParamTypes(),
+ FnType->getExtProtoInfo().withExceptionSpec(EST_None));
+
llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
if (InternalId)
return InternalId;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index e1c7f486d334..f1565511f98a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -881,6 +881,9 @@ public:
ForDefinition_t IsForDefinition
= NotForDefinition);
+ // Return the function body address of the given function.
+ llvm::Constant *GetFunctionStart(const ValueDecl *Decl);
+
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
index f4ebe6885675..95763d8e18b7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -209,12 +209,12 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
return createScalarTypeNode(OutName, getChar(), Size);
}
- if (const auto *EIT = dyn_cast<ExtIntType>(Ty)) {
+ if (const auto *EIT = dyn_cast<BitIntType>(Ty)) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
// Don't specify signed/unsigned since integer types can alias despite sign
// differences.
- Out << "_ExtInt(" << EIT->getNumBits() << ')';
+ Out << "_BitInt(" << EIT->getNumBits() << ')';
return createScalarTypeNode(OutName, getChar(), Size);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index fb05475a4e8c..77721510dfd0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -97,10 +97,10 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
llvm::Type *R = ConvertType(T);
- // If this is a bool type, or an ExtIntType in a bitfield representation,
- // map this integer to the target-specified size.
- if ((ForBitField && T->isExtIntType()) ||
- (!T->isExtIntType() && R->isIntegerTy(1)))
+ // If this is a bool type, or a bit-precise integer type in a bitfield
+ // representation, map this integer to the target-specified size.
+ if ((ForBitField && T->isBitIntType()) ||
+ (!T->isBitIntType() && R->isIntegerTy(1)))
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
@@ -786,8 +786,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
break;
}
- case Type::ExtInt: {
- const auto &EIT = cast<ExtIntType>(Ty);
+ case Type::BitInt: {
+ const auto &EIT = cast<BitIntType>(Ty);
ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
break;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 04163aeaddc5..1a15b09c7b2b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -1345,7 +1345,8 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
- CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
+ CGF.EmitAnyExprToExn(
+ E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
// Now throw the exception.
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
@@ -2465,7 +2466,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
- Address guardAddr = Address(guard, guardAlignment);
+ Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
// Test whether the variable has completed initialization.
//
@@ -2880,7 +2881,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
Guard->setAlignment(GuardAlign.getAsAlign());
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
- InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
+ InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
// On Darwin platforms, use CXX_FAST_TLS calling convention.
if (CGM.getTarget().getTriple().isOSDarwin()) {
InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
@@ -3529,7 +3530,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm_unreachable("Pipe types shouldn't get here");
case Type::Builtin:
- case Type::ExtInt:
+ case Type::BitInt:
// GCC treats vector and complex types as fundamental types.
case Type::Vector:
case Type::ExtVector:
@@ -3802,7 +3803,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Pipe:
break;
- case Type::ExtInt:
+ case Type::BitInt:
break;
case Type::ConstantArray:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 0fd5a0ffe06c..5971a7709304 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -917,7 +917,7 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy) {
- Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
+ Value = CGF.Builder.CreateElementBitCast(Value, CGF.Int8Ty);
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const ASTContext &Context = getContext();
@@ -2408,14 +2408,14 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
StringRef VarName("_Init_thread_epoch");
CharUnits Align = CGM.getIntAlign();
if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CGM.IntTy,
/*isConstant=*/false, llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, VarName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
GV->setAlignment(Align.getAsAlign());
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
}
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
@@ -2567,7 +2567,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GI->Guard = GuardVar;
}
- ConstantAddress GuardAddr(GuardVar, GuardAlign);
+ ConstantAddress GuardAddr(GuardVar, GuardTy, GuardAlign);
assert(GuardVar->getLinkage() == GV->getLinkage() &&
"static local from the same function had different linkage");
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index 36e0319c8ab9..85089cdb2200 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -104,7 +104,7 @@ bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
if (Ty->isPromotableIntegerType())
return true;
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
return true;
@@ -431,7 +431,7 @@ static Address emitMergePHI(CodeGenFunction &CGF,
PHI->addIncoming(Addr1.getPointer(), Block1);
PHI->addIncoming(Addr2.getPointer(), Block2);
CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
- return Address(PHI, Align);
+ return Address(PHI, Addr1.getElementType(), Align);
}
TargetCodeGenInfo::~TargetCodeGenInfo() = default;
@@ -762,7 +762,7 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
Ty = EnumTy->getDecl()->getIntegerType();
ASTContext &Context = getContext();
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() >
Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
? Context.Int128Ty
@@ -784,7 +784,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() >
getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
? getContext().Int128Ty
@@ -1008,8 +1008,9 @@ ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
} else if (Ty->isFloatingType()) {
// Floating-point types don't go inreg.
return ABIArgInfo::getDirect();
- } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
- // Treat extended integers as integers if <=64, otherwise pass indirectly.
+ } else if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ // Treat bit-precise integers as integers if <= 64, otherwise pass
+ // indirectly.
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(Ty);
return ABIArgInfo::getDirect();
@@ -1027,8 +1028,8 @@ ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
if (isAggregateTypeForABI(RetTy))
return getNaturalAlignIndirect(RetTy);
- // Treat extended integers as integers if <=64, otherwise pass indirectly.
- if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
+ // Treat bit-precise integers as integers if <= 64, otherwise pass indirectly.
+ if (const auto *EIT = RetTy->getAs<BitIntType>()) {
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(RetTy);
return ABIArgInfo::getDirect();
@@ -1590,7 +1591,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getIndirectReturnResult(RetTy, State);
@@ -1926,7 +1927,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
return ABIArgInfo::getExtend(Ty);
}
- if (const auto * EIT = Ty->getAs<ExtIntType>()) {
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
if (EIT->getNumBits() <= 64) {
if (InReg)
return ABIArgInfo::getDirectInReg();
@@ -3009,7 +3010,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
return;
}
- if (const auto *EITy = Ty->getAs<ExtIntType>()) {
+ if (const auto *EITy = Ty->getAs<BitIntType>()) {
if (EITy->getNumBits() <= 64)
Current = Integer;
else if (EITy->getNumBits() <= 128)
@@ -3200,7 +3201,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- if (Ty->isExtIntType())
+ if (Ty->isBitIntType())
return getNaturalAlignIndirect(Ty);
return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
@@ -3237,7 +3238,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
// but this code would be much safer if we could mark the argument with
// 'onstack'. See PR12193.
if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
- !Ty->isExtIntType()) {
+ !Ty->isBitIntType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
@@ -4033,7 +4034,7 @@ static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
// AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Address(Res, Align);
+ return Address(Res, LTy, Align);
}
Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -4146,7 +4147,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
} else if (neededInt) {
RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
- CharUnits::fromQuantity(8));
+ CGF.Int8Ty, CharUnits::fromQuantity(8));
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
// Copy to a temporary if necessary to ensure the appropriate alignment.
@@ -4164,7 +4165,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
} else if (neededSSE == 1) {
RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
- CharUnits::fromQuantity(16));
+ CGF.Int8Ty, CharUnits::fromQuantity(16));
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
} else {
assert(neededSSE == 2 && "Invalid number of needed registers!");
@@ -4176,7 +4177,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// all the SSE registers to the RSA.
Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
fp_offset),
- CharUnits::fromQuantity(16));
+ CGF.Int8Ty, CharUnits::fromQuantity(16));
Address RegAddrHi =
CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
CharUnits::fromQuantity(16));
@@ -4357,12 +4358,12 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
}
}
- if (Ty->isExtIntType()) {
+ if (Ty->isBitIntType()) {
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
- // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
- // anyway as long is it fits in them, so we don't have to check the power of
- // 2.
+ // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
+ // or 8 bytes anyway as long is it fits in them, so we don't have to check
+ // the power of 2.
if (Width <= 64)
return ABIArgInfo::getDirect();
return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
@@ -5069,7 +5070,7 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
break;
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < 64)
return true;
@@ -5083,13 +5084,16 @@ CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
if (const ComplexType *CTy = Ty->getAs<ComplexType>())
Ty = CTy->getElementType();
+ auto FloatUsesVector = [this](QualType Ty){
+ return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
+ Ty) == &llvm::APFloat::IEEEquad();
+ };
+
// Only vector types of size 16 bytes need alignment (larger types are
// passed via reference, smaller types are not aligned).
if (Ty->isVectorType()) {
return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
- } else if (Ty->isRealFloatingType() &&
- &getContext().getFloatTypeSemantics(Ty) ==
- &llvm::APFloat::IEEEquad()) {
+ } else if (FloatUsesVector(Ty)) {
// According to ABI document section 'Optional Save Areas': If extended
// precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
// format are supported, map them to a single quadword, quadword aligned.
@@ -5116,7 +5120,9 @@ CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
// With special case aggregates, only vector base types need alignment.
if (AlignAsType) {
- return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
+ bool UsesVector = AlignAsType->isVectorType() ||
+ FloatUsesVector(QualType(AlignAsType, 0));
+ return CharUnits::fromQuantity(UsesVector ? 16 : 8);
}
// Otherwise, we only need alignment for any aggregate type that
@@ -5289,7 +5295,7 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
}
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
@@ -5365,7 +5371,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
}
}
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
@@ -5717,7 +5723,7 @@ AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
Ty = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(Ty);
@@ -5819,7 +5825,7 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
return getNaturalAlignIndirect(RetTy);
@@ -6561,7 +6567,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
Ty = EnumTy->getDecl()->getIntegerType();
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
@@ -6763,7 +6769,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
@@ -7100,7 +7106,7 @@ bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
(T->isFloat128Type() ||
(T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
return true;
- if (const auto *EIT = T->getAs<ExtIntType>())
+ if (const auto *EIT = T->getAs<BitIntType>())
return EIT->getNumBits() >
(Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
@@ -7177,7 +7183,7 @@ ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
return getNaturalAlignIndirect(Ty, /* byval */ true);
}
- if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
if ((EIT->getNumBits() > 128) ||
(!getContext().getTargetInfo().hasInt128Type() &&
EIT->getNumBits() > 64))
@@ -7391,7 +7397,7 @@ bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
return true;
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < 64)
return true;
@@ -7994,7 +8000,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
Ty = EnumTy->getDecl()->getIntegerType();
// Make sure we pass indirectly things that are too large.
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128 ||
(EIT->getNumBits() > 64 &&
!getContext().getTargetInfo().hasInt128Type()))
@@ -8085,7 +8091,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
RetTy = EnumTy->getDecl()->getIntegerType();
// Make sure we pass indirectly things that are too large.
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128 ||
(EIT->getNumBits() > 64 &&
!getContext().getTargetInfo().hasInt128Type()))
@@ -8460,7 +8466,7 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
if (Size <= 64)
HexagonAdjustRegsLeft(Size, RegsLeft);
- if (Size > 64 && Ty->isExtIntType())
+ if (Size > 64 && Ty->isBitIntType())
return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
@@ -8516,7 +8522,7 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
RetTy = EnumTy->getDecl()->getIntegerType();
- if (Size > 64 && RetTy->isExtIntType())
+ if (Size > 64 && RetTy->isBitIntType())
return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
@@ -8887,7 +8893,7 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
bool InReg = shouldUseInReg(Ty, State);
// Don't pass >64 bit integers in registers.
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getIndirectResult(Ty, /*ByVal=*/true, State);
@@ -9161,6 +9167,10 @@ class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
: TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
+
+ void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,
+ CodeGenModule &CGM) const;
+
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
@@ -9200,36 +9210,13 @@ static bool requiresAMDGPUProtectedVisibility(const Decl *D,
cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
}
-void AMDGPUTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (requiresAMDGPUProtectedVisibility(D, GV)) {
- GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- GV->setDSOLocal(true);
- }
-
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
-
- llvm::Function *F = cast<llvm::Function>(GV);
-
- const auto *ReqdWGS = M.getLangOpts().OpenCL ?
- FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
-
-
- const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
- FD->hasAttr<OpenCLKernelAttr>();
- const bool IsHIPKernel = M.getLangOpts().HIP &&
- FD->hasAttr<CUDAGlobalAttr>();
- if ((IsOpenCLKernel || IsHIPKernel) &&
- (M.getTriple().getOS() == llvm::Triple::AMDHSA))
- F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
-
- if (IsHIPKernel)
- F->addFnAttr("uniform-work-group-size", "true");
-
+void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
+ const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const {
+ const auto *ReqdWGS =
+ M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
+ const bool IsOpenCLKernel =
+ M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>();
+ const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>();
const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
if (ReqdWGS || FlatWGS) {
@@ -9297,6 +9284,38 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
if (NumVGPR != 0)
F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
}
+}
+
+void AMDGPUTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (requiresAMDGPUProtectedVisibility(D, GV)) {
+ GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ GV->setDSOLocal(true);
+ }
+
+ if (GV->isDeclaration())
+ return;
+
+ llvm::Function *F = dyn_cast<llvm::Function>(GV);
+ if (!F)
+ return;
+
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD)
+ setFunctionDeclAttributes(FD, F, M);
+
+ const bool IsOpenCLKernel =
+ M.getLangOpts().OpenCL && FD && FD->hasAttr<OpenCLKernelAttr>();
+ const bool IsHIPKernel =
+ M.getLangOpts().HIP && FD && FD->hasAttr<CUDAGlobalAttr>();
+
+ const bool IsOpenMP = M.getLangOpts().OpenMP && !FD;
+ if ((IsOpenCLKernel || IsHIPKernel || IsOpenMP) &&
+ (M.getTriple().getOS() == llvm::Triple::AMDHSA))
+ F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
+
+ if (IsHIPKernel)
+ F->addFnAttr("uniform-work-group-size", "true");
if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
@@ -9343,7 +9362,9 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
if (AddrSpace != LangAS::Default)
return AddrSpace;
- if (CGM.isTypeConstant(D->getType(), false)) {
+ // Only promote to address space 4 if VarDecl has constant initialization.
+ if (CGM.isTypeConstant(D->getType(), false) &&
+ D->hasConstantInitialization()) {
if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
return ConstAS.getValue();
}
@@ -9606,7 +9627,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
if (Size < 64 && Ty->isIntegerType())
return ABIArgInfo::getExtend(Ty);
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() < 64)
return ABIArgInfo::getExtend(Ty);
@@ -9860,7 +9881,7 @@ ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
ABIArgInfo::getDirect(Result, 0, nullptr, false);
}
- if (const auto *EIT = Ty->getAs<ExtIntType>())
+ if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
return getIndirectByValue(Ty);
@@ -10209,12 +10230,23 @@ public:
private:
void setCCs();
};
+
+class SPIRVABIInfo : public CommonSPIRABIInfo {
+public:
+ SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+private:
+ ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
+};
} // end anonymous namespace
namespace {
class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
+ CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
+ : TargetCodeGenInfo(std::move(ABIInfo)) {}
LangAS getASTAllocaAddressSpace() const override {
return getLangASFromTargetAS(
@@ -10223,18 +10255,60 @@ public:
unsigned getOpenCLKernelCallingConv() const override;
};
-
+class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
+public:
+ SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
+ void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
+};
} // End anonymous namespace.
+
void CommonSPIRABIInfo::setCCs() {
assert(getRuntimeCC() == llvm::CallingConv::C);
RuntimeCC = llvm::CallingConv::SPIR_FUNC;
}
+ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
+ if (getContext().getLangOpts().HIP) {
+ // Coerce pointer arguments with default address space to CrossWorkGroup
+ // pointers for HIPSPV. When the language mode is HIP, the SPIRTargetInfo
+ // maps cuda_device to SPIR-V's CrossWorkGroup address space.
+ llvm::Type *LTy = CGT.ConvertType(Ty);
+ auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
+ auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
+ if (LTy->isPointerTy() && LTy->getPointerAddressSpace() == DefaultAS) {
+ LTy = llvm::PointerType::get(
+ cast<llvm::PointerType>(LTy)->getElementType(), GlobalAS);
+ return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
+ }
+ }
+ return classifyArgumentType(Ty);
+}
+
+void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ // The logic is same as in DefaultABIInfo with an exception on the kernel
+ // arguments handling.
+ llvm::CallingConv::ID CC = FI.getCallingConvention();
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments()) {
+ if (CC == llvm::CallingConv::SPIR_KERNEL) {
+ I.info = classifyKernelArgumentType(I.type);
+ } else {
+ I.info = classifyArgumentType(I.type);
+ }
+ }
+}
+
namespace clang {
namespace CodeGen {
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
- DefaultABIInfo SPIRABI(CGM.getTypes());
- SPIRABI.computeInfo(FI);
+ if (CGM.getTarget().getTriple().isSPIRV())
+ SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
+ else
+ CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
}
}
}
@@ -10243,6 +10317,16 @@ unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
return llvm::CallingConv::SPIR_KERNEL;
}
+void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
+ const FunctionType *&FT) const {
+ // Convert HIP kernels to SPIR-V kernels.
+ if (getABIInfo().getContext().getLangOpts().HIP) {
+ FT = getABIInfo().getContext().adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+ return;
+ }
+}
+
static bool appendType(SmallStringEnc &Enc, QualType QType,
const CodeGen::CodeGenModule &CGM,
TypeStringCache &TSC);
@@ -10943,7 +11027,7 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
return extendType(Ty);
}
- if (const auto *EIT = Ty->getAs<ExtIntType>()) {
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
if (EIT->getNumBits() < XLen && !MustUseStack)
return extendType(Ty);
if (EIT->getNumBits() > 128 ||
@@ -11308,9 +11392,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new ARCTargetCodeGenInfo(Types));
case llvm::Triple::spir:
case llvm::Triple::spir64:
+ return SetCGInfo(new CommonSPIRTargetCodeGenInfo(Types));
case llvm::Triple::spirv32:
case llvm::Triple::spirv64:
- return SetCGInfo(new CommonSPIRTargetCodeGenInfo(Types));
+ return SetCGInfo(new SPIRVTargetCodeGenInfo(Types));
case llvm::Triple::ve:
return SetCGInfo(new VETargetCodeGenInfo(Types));
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index d501bd026219..3b551ea94cc2 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -23,7 +23,8 @@
#include "ToolChains/FreeBSD.h"
#include "ToolChains/Fuchsia.h"
#include "ToolChains/Gnu.h"
-#include "ToolChains/HIP.h"
+#include "ToolChains/HIPAMD.h"
+#include "ToolChains/HIPSPV.h"
#include "ToolChains/Haiku.h"
#include "ToolChains/Hexagon.h"
#include "ToolChains/Hurd.h"
@@ -42,6 +43,7 @@
#include "ToolChains/PPCLinux.h"
#include "ToolChains/PS4CPU.h"
#include "ToolChains/RISCVToolchain.h"
+#include "ToolChains/SPIRV.h"
#include "ToolChains/Solaris.h"
#include "ToolChains/TCE.h"
#include "ToolChains/VEToolchain.h"
@@ -99,8 +101,39 @@ using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
-static llvm::Triple getHIPOffloadTargetTriple() {
- static const llvm::Triple T("amdgcn-amd-amdhsa");
+static llvm::Optional<llvm::Triple>
+getHIPOffloadTargetTriple(const Driver &D, const ArgList &Args) {
+ if (Args.hasArg(options::OPT_offload_EQ)) {
+ auto HIPOffloadTargets = Args.getAllArgValues(options::OPT_offload_EQ);
+
+ // HIP compilation flow does not support multiple targets for now. We need
+ // the HIPActionBuilder (and possibly the CudaActionBuilder{,Base}too) to
+ // support multiple tool chains first.
+ switch (HIPOffloadTargets.size()) {
+ default:
+ D.Diag(diag::err_drv_only_one_offload_target_supported_in) << "HIP";
+ return llvm::None;
+ case 0:
+ D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << "";
+ return llvm::None;
+ case 1:
+ break;
+ }
+ llvm::Triple TT(HIPOffloadTargets[0]);
+ if (TT.getArch() == llvm::Triple::amdgcn &&
+ TT.getVendor() == llvm::Triple::AMD &&
+ TT.getOS() == llvm::Triple::AMDHSA)
+ return TT;
+ if (TT.getArch() == llvm::Triple::spirv64 &&
+ TT.getVendor() == llvm::Triple::UnknownVendor &&
+ TT.getOS() == llvm::Triple::UnknownOS)
+ return TT;
+ D.Diag(diag::err_drv_invalid_or_unsupported_offload_target)
+ << HIPOffloadTargets[0];
+ return llvm::None;
+ }
+
+ static const llvm::Triple T("amdgcn-amd-amdhsa"); // Default HIP triple.
return T;
}
@@ -694,17 +727,14 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
return;
}
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
- const llvm::Triple &HostTriple = HostTC->getTriple();
auto OFK = Action::OFK_HIP;
- llvm::Triple HIPTriple = getHIPOffloadTargetTriple();
- // Use the HIP and host triples as the key into the ToolChains map,
- // because the device toolchain we create depends on both.
- auto &HIPTC = ToolChains[HIPTriple.str() + "/" + HostTriple.str()];
- if (!HIPTC) {
- HIPTC = std::make_unique<toolchains::HIPToolChain>(
- *this, HIPTriple, *HostTC, C.getInputArgs());
- }
- C.addOffloadDeviceToolChain(HIPTC.get(), OFK);
+ auto HIPTriple = getHIPOffloadTargetTriple(*this, C.getInputArgs());
+ if (!HIPTriple)
+ return;
+ auto *HIPTC = &getOffloadingDeviceToolChain(C.getInputArgs(), *HIPTriple,
+ *HostTC, OFK);
+ assert(HIPTC && "Could not create offloading device tool chain.");
+ C.addOffloadDeviceToolChain(HIPTC, OFK);
}
//
@@ -1545,7 +1575,7 @@ int Driver::ExecuteCompilation(
if (Diags.hasErrorOccurred())
return 1;
- // Set up response file names for each command, if necessary
+ // Set up response file names for each command, if necessary.
for (auto &Job : C.getJobs())
setUpResponseFiles(C, Job);
@@ -2040,11 +2070,7 @@ static bool ContainsCompileOrAssembleAction(const Action *A) {
isa<AssembleJobAction>(A))
return true;
- for (const Action *Input : A->inputs())
- if (ContainsCompileOrAssembleAction(Input))
- return true;
-
- return false;
+ return llvm::any_of(A->inputs(), ContainsCompileOrAssembleAction);
}
void Driver::BuildUniversalActions(Compilation &C, const ToolChain &TC,
@@ -2733,6 +2759,14 @@ class OffloadingActionBuilder final {
}
}
+ // --offload and --offload-arch options are mutually exclusive.
+ if (Args.hasArgNoClaim(options::OPT_offload_EQ) &&
+ Args.hasArgNoClaim(options::OPT_offload_arch_EQ,
+ options::OPT_no_offload_arch_EQ)) {
+ C.getDriver().Diag(diag::err_opt_not_valid_with_opt) << "--offload-arch"
+ << "--offload";
+ }
+
// Collect all cuda_gpu_arch parameters, removing duplicates.
std::set<StringRef> GpuArchs;
bool Error = false;
@@ -2775,8 +2809,12 @@ class OffloadingActionBuilder final {
// Default to sm_20 which is the lowest common denominator for
// supported GPUs. sm_20 code should work correctly, if
// suboptimally, on all newer GPUs.
- if (GpuArchList.empty())
- GpuArchList.push_back(DefaultCudaArch);
+ if (GpuArchList.empty()) {
+ if (ToolChains.front()->getTriple().isSPIRV())
+ GpuArchList.push_back(CudaArch::Generic);
+ else
+ GpuArchList.push_back(DefaultCudaArch);
+ }
return Error;
}
@@ -2937,8 +2975,11 @@ class OffloadingActionBuilder final {
StringRef getCanonicalOffloadArch(StringRef IdStr) override {
llvm::StringMap<bool> Features;
- auto ArchStr =
- parseTargetID(getHIPOffloadTargetTriple(), IdStr, &Features);
+ // getHIPOffloadTargetTriple() is known to return valid value as it has
+ // been called successfully in the CreateOffloadingDeviceToolChains().
+ auto ArchStr = parseTargetID(
+ *getHIPOffloadTargetTriple(C.getDriver(), C.getInputArgs()), IdStr,
+ &Features);
if (!ArchStr) {
C.getDriver().Diag(clang::diag::err_drv_bad_target_id) << IdStr;
C.setContainsError();
@@ -2992,9 +3033,19 @@ class OffloadingActionBuilder final {
// When LTO is not enabled, we follow the conventional
// compiler phases, including backend and assemble phases.
ActionList AL;
- auto BackendAction = C.getDriver().ConstructPhaseAction(
- C, Args, phases::Backend, CudaDeviceActions[I],
- AssociatedOffloadKind);
+ Action *BackendAction = nullptr;
+ if (ToolChains.front()->getTriple().isSPIRV()) {
+ // Emit LLVM bitcode for SPIR-V targets. SPIR-V device tool chain
+ // (HIPSPVToolChain) runs post-link LLVM IR passes.
+ types::ID Output = Args.hasArg(options::OPT_S)
+ ? types::TY_LLVM_IR
+ : types::TY_LLVM_BC;
+ BackendAction =
+ C.MakeAction<BackendJobAction>(CudaDeviceActions[I], Output);
+ } else
+ BackendAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Backend, CudaDeviceActions[I],
+ AssociatedOffloadKind);
auto AssembleAction = C.getDriver().ConstructPhaseAction(
C, Args, phases::Assemble, BackendAction,
AssociatedOffloadKind);
@@ -3724,6 +3775,14 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
}
+ // FIXME: Linking separate translation units for SPIR-V is not supported yet.
+ // It can be done either by LLVM IR linking before conversion of the final
+ // linked module to SPIR-V or external SPIR-V linkers can be used e.g.
+ // spirv-link.
+ if (C.getDefaultToolChain().getTriple().isSPIRV() && Inputs.size() > 1) {
+ Diag(clang::diag::warn_drv_spirv_linking_multiple_inputs_unsupported);
+ }
+
handleArguments(C, Args, Inputs, Actions);
// Builder to be used to build offloading actions.
@@ -3763,8 +3822,15 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Queue linker inputs.
if (Phase == phases::Link) {
assert(Phase == PL.back() && "linking must be final compilation step.");
- LinkerInputs.push_back(Current);
- Current = nullptr;
+ // Compilation phases are setup per language, however for SPIR-V the
+ // final linking phase is meaningless since the compilation phase
+ // produces the final binary.
+ // FIXME: OpenCL - we could strip linking phase out from OpenCL
+ // compilation phases if we could verify it is not needed by any target.
+ if (!C.getDefaultToolChain().getTriple().isSPIRV()) {
+ LinkerInputs.push_back(Current);
+ Current = nullptr;
+ }
break;
}
@@ -4337,6 +4403,12 @@ class ToolSelector final {
if (!T)
return nullptr;
+ // Can't collapse if we don't have codegen support unless we are
+ // emitting LLVM IR.
+ bool OutputIsLLVM = types::isLLVMIR(ActionInfo[0].JA->getType());
+ if (!T->hasIntegratedBackend() && !(OutputIsLLVM && T->canEmitIR()))
+ return nullptr;
+
// When using -fembed-bitcode, it is required to have the same tool (clang)
// for both CompilerJA and BackendJA. Otherwise, combine two stages.
if (EmbedBitcode) {
@@ -4406,6 +4478,12 @@ class ToolSelector final {
if (!T)
return nullptr;
+ // Can't collapse if we don't have codegen support unless we are
+ // emitting LLVM IR.
+ bool OutputIsLLVM = types::isLLVMIR(ActionInfo[0].JA->getType());
+ if (!T->hasIntegratedBackend() && !(OutputIsLLVM && T->canEmitIR()))
+ return nullptr;
+
if (T->canEmitIR() && ((SaveTemps && !InputIsBitcode) || EmbedBitcode))
return nullptr;
@@ -5429,6 +5507,10 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::ve:
TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
break;
+ case llvm::Triple::spirv32:
+ case llvm::Triple::spirv64:
+ TC = std::make_unique<toolchains::SPIRVToolChain>(*this, Target, Args);
+ break;
default:
if (Target.getVendor() == llvm::Triple::Myriad)
TC = std::make_unique<toolchains::MyriadToolChain>(*this, Target,
@@ -5453,6 +5535,38 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
return *TC;
}
+const ToolChain &Driver::getOffloadingDeviceToolChain(
+ const ArgList &Args, const llvm::Triple &Target, const ToolChain &HostTC,
+ const Action::OffloadKind &TargetDeviceOffloadKind) const {
+ // Use device / host triples as the key into the ToolChains map because the
+ // device ToolChain we create depends on both.
+ auto &TC = ToolChains[Target.str() + "/" + HostTC.getTriple().str()];
+ if (!TC) {
+ // Categorized by offload kind > arch rather than OS > arch like
+ // the normal getToolChain call, as it seems a reasonable way to categorize
+ // things.
+ switch (TargetDeviceOffloadKind) {
+ case Action::OFK_HIP: {
+ if (Target.getArch() == llvm::Triple::amdgcn &&
+ Target.getVendor() == llvm::Triple::AMD &&
+ Target.getOS() == llvm::Triple::AMDHSA)
+ TC = std::make_unique<toolchains::HIPAMDToolChain>(*this, Target,
+ HostTC, Args);
+ else if (Target.getArch() == llvm::Triple::spirv64 &&
+ Target.getVendor() == llvm::Triple::UnknownVendor &&
+ Target.getOS() == llvm::Triple::UnknownOS)
+ TC = std::make_unique<toolchains::HIPSPVToolChain>(*this, Target,
+ HostTC, Args);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ return *TC;
+}
+
bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
// Say "no" if there is not exactly one input of a type clang understands.
if (JA.size() != 1 ||
diff --git a/contrib/llvm-project/clang/lib/Driver/Job.cpp b/contrib/llvm-project/clang/lib/Driver/Job.cpp
index 5b87106b6565..f63763effaff 100644
--- a/contrib/llvm-project/clang/lib/Driver/Job.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Job.cpp
@@ -388,6 +388,8 @@ int CC1Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
Argv.push_back(getExecutable());
Argv.append(getArguments().begin(), getArguments().end());
Argv.push_back(nullptr);
+ Argv.pop_back(); // The terminating null element shall not be part of the
+ // slice (main() behavior).
// This flag simply indicates that the program couldn't start, which isn't
// applicable here.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
index 6588cdf9fecd..50c89aaadc18 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
@@ -260,7 +260,7 @@ bool ToolChain::IsUnwindTablesDefault(const ArgList &Args) const {
Tool *ToolChain::getClang() const {
if (!Clang)
- Clang.reset(new tools::Clang(*this));
+ Clang.reset(new tools::Clang(*this, useIntegratedBackend()));
return Clang.get();
}
@@ -541,12 +541,9 @@ std::string ToolChain::GetProgramPath(const char *Name) const {
return D.GetProgramPath(Name, *this);
}
-std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
- bool *LinkerIsLLDDarwinNew) const {
+std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD) const {
if (LinkerIsLLD)
*LinkerIsLLD = false;
- if (LinkerIsLLDDarwinNew)
- *LinkerIsLLDDarwinNew = false;
// Get -fuse-ld= first to prevent -Wunused-command-line-argument. -fuse-ld= is
// considered as the linker flavor, e.g. "bfd", "gold", or "lld".
@@ -599,11 +596,8 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
if (llvm::sys::fs::can_execute(LinkerPath)) {
- // FIXME: Remove LinkerIsLLDDarwinNew once there's only one MachO lld.
if (LinkerIsLLD)
- *LinkerIsLLD = UseLinker == "lld" || UseLinker == "lld.darwinold";
- if (LinkerIsLLDDarwinNew)
- *LinkerIsLLDDarwinNew = UseLinker == "lld";
+ *LinkerIsLLD = UseLinker == "lld";
return LinkerPath;
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
index b5eaf1adca6b..43ce33750eba 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -478,7 +478,8 @@ void RocmInstallationDetector::print(raw_ostream &OS) const {
void RocmInstallationDetector::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- bool UsesRuntimeWrapper = VersionMajorMinor > llvm::VersionTuple(3, 5);
+ bool UsesRuntimeWrapper = VersionMajorMinor > llvm::VersionTuple(3, 5) &&
+ !DriverArgs.hasArg(options::OPT_nohipwrapperinc);
if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
// HIP header includes standard library wrapper headers under clang
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
index 863e2c597d53..f282f04b7931 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -267,7 +267,7 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions(
std::string BitcodeSuffix;
if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
- options::OPT_fno_openmp_target_new_runtime, false))
+ options::OPT_fno_openmp_target_new_runtime, true))
BitcodeSuffix = "new-amdgpu-" + GPUArch;
else
BitcodeSuffix = "amdgcn-" + GPUArch;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index abc32f22d2a1..be13d6d583ce 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -568,4 +568,15 @@ fp16_fml_fallthrough:
if (Args.hasArg(options::OPT_mno_neg_immediates))
Features.push_back("+no-neg-immediates");
+
+ if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
+ options::OPT_mno_fix_cortex_a53_835769)) {
+ if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769))
+ Features.push_back("+fix-cortex-a53-835769");
+ else
+ Features.push_back("-fix-cortex-a53-835769");
+ } else if (Triple.isAndroid()) {
+ // Enabled A53 errata (835769) workaround by default on android
+ Features.push_back("+fix-cortex-a53-835769");
+ }
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index 21c091e1a0ba..4013cf230026 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -147,19 +147,27 @@ bool arm::useAAPCSForMachO(const llvm::Triple &T) {
T.getOS() == llvm::Triple::UnknownOS || isARMMProfile(T);
}
+// We follow GCC and support when the backend has support for the MRC/MCR
+// instructions that are used to set the hard thread pointer ("CP15 C13
+// Thread id").
+bool arm::isHardTPSupported(const llvm::Triple &Triple) {
+ int Ver = getARMSubArchVersionNumber(Triple);
+ llvm::ARM::ArchKind AK = llvm::ARM::parseArch(Triple.getArchName());
+ return Triple.isARM() || AK == llvm::ARM::ArchKind::ARMV6T2 ||
+ (Ver >= 7 && AK != llvm::ARM::ArchKind::ARMV8MBaseline);
+}
+
// Select mode for reading thread pointer (-mtp=soft/cp15).
arm::ReadTPMode arm::getReadTPMode(const Driver &D, const ArgList &Args,
- const llvm::Triple &Triple) {
+ const llvm::Triple &Triple, bool ForAS) {
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
arm::ReadTPMode ThreadPointer =
llvm::StringSwitch<arm::ReadTPMode>(A->getValue())
.Case("cp15", ReadTPMode::Cp15)
.Case("soft", ReadTPMode::Soft)
.Default(ReadTPMode::Invalid);
- if (ThreadPointer == ReadTPMode::Cp15 &&
- getARMSubArchVersionNumber(Triple) < 7 &&
- llvm::ARM::parseArch(Triple.getArchName()) !=
- llvm::ARM::ArchKind::ARMV6T2) {
+ if (ThreadPointer == ReadTPMode::Cp15 && !isHardTPSupported(Triple) &&
+ !ForAS) {
D.Diag(diag::err_target_unsupported_tp_hard) << Triple.getArchName();
return ReadTPMode::Invalid;
}
@@ -430,7 +438,6 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
arm::FloatABI ABI = arm::getARMFloatABI(D, Triple, Args);
- arm::ReadTPMode ThreadPointer = arm::getReadTPMode(D, Args, Triple);
llvm::Optional<std::pair<const Arg *, StringRef>> WaCPU, WaFPU, WaHDiv,
WaArch;
@@ -482,7 +489,7 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
}
}
- if (ThreadPointer == arm::ReadTPMode::Cp15)
+ if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::Cp15)
Features.push_back("+read-tp-hard");
const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
@@ -869,6 +876,8 @@ fp16_fml_fallthrough:
}
}
+ if (Args.getLastArg(options::OPT_mno_bti_at_return_twice))
+ Features.push_back("+no-bti-at-return-twice");
}
std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
index b6fd68fbb9c6..881b63bd36b9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -53,8 +53,9 @@ FloatABI getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
void setFloatABIInTriple(const Driver &D, const llvm::opt::ArgList &Args,
llvm::Triple &triple);
+bool isHardTPSupported(const llvm::Triple &Triple);
ReadTPMode getReadTPMode(const Driver &D, const llvm::opt::ArgList &Args,
- const llvm::Triple &Triple);
+ const llvm::Triple &Triple, bool ForAS);
void setArchNameInTriple(const Driver &D, const llvm::opt::ArgList &Args,
types::ID InputType, llvm::Triple &Triple);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index 62e8d3fa2a93..94b179636e4f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -195,27 +195,11 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
auto ParseResult = llvm::RISCVISAInfo::parseArchString(
Arch, /* EnableExperimentalExtension */ true);
- if (!ParseResult) {
+ if (!ParseResult)
// Ignore parsing error, just go 3rd step.
consumeError(ParseResult.takeError());
- } else {
- auto &ISAInfo = *ParseResult;
- bool HasD = ISAInfo->hasExtension("d");
- unsigned XLen = ISAInfo->getXLen();
- if (XLen == 32) {
- bool HasE = ISAInfo->hasExtension("e");
- if (HasD)
- return "ilp32d";
- if (HasE)
- return "ilp32e";
- return "ilp32";
- } else if (XLen == 64) {
- if (HasD)
- return "lp64d";
- return "lp64";
- }
- llvm_unreachable("unhandled XLen");
- }
+ else
+ return llvm::RISCV::computeDefaultABIFromArch(**ParseResult);
// 3. Choose a default based on the triple
//
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index c5aaa067c4f5..65347a38490e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -625,8 +625,9 @@ getFramePointerKind(const ArgList &Args, const llvm::Triple &Triple) {
}
/// Add a CC1 option to specify the debug compilation directory.
-static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs,
- const llvm::vfs::FileSystem &VFS) {
+static const char *addDebugCompDirArg(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const llvm::vfs::FileSystem &VFS) {
if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
options::OPT_fdebug_compilation_dir_EQ)) {
if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
@@ -638,6 +639,31 @@ static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs,
VFS.getCurrentWorkingDirectory()) {
CmdArgs.push_back(Args.MakeArgString("-fdebug-compilation-dir=" + *CWD));
}
+ StringRef Path(CmdArgs.back());
+ return Path.substr(Path.find('=') + 1).data();
+}
+
+static void addDebugObjectName(const ArgList &Args, ArgStringList &CmdArgs,
+ const char *DebugCompilationDir,
+ const char *OutputFileName) {
+ // No need to generate a value for -object-file-name if it was provided.
+ for (auto *Arg : Args.filtered(options::OPT_Xclang))
+ if (StringRef(Arg->getValue()).startswith("-object-file-name"))
+ return;
+
+ if (Args.hasArg(options::OPT_object_file_name_EQ))
+ return;
+
+ SmallString<128> ObjFileNameForDebug(OutputFileName);
+ if (ObjFileNameForDebug != "-" &&
+ !llvm::sys::path::is_absolute(ObjFileNameForDebug) &&
+ (!DebugCompilationDir ||
+ llvm::sys::path::is_absolute(DebugCompilationDir))) {
+ // Make the path absolute in the debug infos like MSVC does.
+ llvm::sys::fs::make_absolute(ObjFileNameForDebug);
+ }
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-object-file-name=") + ObjFileNameForDebug));
}
/// Add a CC1 and CC1AS option to specify the debug file path prefix map.
@@ -976,11 +1002,7 @@ static bool ContainsCompileAction(const Action *A) {
if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A))
return true;
- for (const auto &AI : A->inputs())
- if (ContainsCompileAction(AI))
- return true;
-
- return false;
+ return llvm::any_of(A->inputs(), ContainsCompileAction);
}
/// Check if -relax-all should be passed to the internal assembler.
@@ -1806,19 +1828,6 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
RenderAArch64ABI(Triple, Args, CmdArgs);
- if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
- options::OPT_mno_fix_cortex_a53_835769)) {
- CmdArgs.push_back("-mllvm");
- if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769))
- CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
- else
- CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=0");
- } else if (Triple.isAndroid()) {
- // Enabled A53 errata (835769) workaround by default on android
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
- }
-
// Forward the -mglobal-merge option for explicit control over the pass.
if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
options::OPT_mno_global_merge)) {
@@ -5666,7 +5675,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-autolink");
// Add in -fdebug-compilation-dir if necessary.
- addDebugCompDirArg(Args, CmdArgs, D.getVFS());
+ const char *DebugCompilationDir =
+ addDebugCompDirArg(Args, CmdArgs, D.getVFS());
addDebugPrefixMapArg(D, Args, CmdArgs);
@@ -5904,7 +5914,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// runtime.
if (Args.hasFlag(options::OPT_fopenmp_target_new_runtime,
options::OPT_fno_openmp_target_new_runtime,
- /*Default=*/!getToolChain().getTriple().isAMDGCN()))
+ /*Default=*/true))
CmdArgs.push_back("-fopenmp-target-new-runtime");
// When in OpenMP offloading mode, enable debugging on the device.
@@ -6997,18 +7007,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_moutline_atomics,
options::OPT_mno_outline_atomics)) {
- if (A->getOption().matches(options::OPT_moutline_atomics)) {
- // Option -moutline-atomics supported for AArch64 target only.
- if (!Triple.isAArch64()) {
- D.Diag(diag::warn_drv_moutline_atomics_unsupported_opt)
- << Triple.getArchName();
- } else {
+ // Option -moutline-atomics supported for AArch64 target only.
+ if (!Triple.isAArch64()) {
+ D.Diag(diag::warn_drv_moutline_atomics_unsupported_opt)
+ << Triple.getArchName() << A->getOption().getName();
+ } else {
+ if (A->getOption().matches(options::OPT_moutline_atomics)) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+outline-atomics");
+ } else {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-outline-atomics");
}
- } else {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("-outline-atomics");
}
} else if (Triple.isAArch64() &&
getToolChain().IsAArch64OutlineAtomicsDefault(Args)) {
@@ -7038,6 +7048,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Str));
}
+ // Add the output path to the object file for CodeView debug infos.
+ if (EmitCodeView && Output.isFilename())
+ addDebugObjectName(Args, CmdArgs, DebugCompilationDir,
+ Output.getFilename());
+
// Add the "-o out -x type src.c" flags last. This is done primarily to make
// the -cc1 command easier to edit when reproducing compiler crashes.
if (Output.getType() == types::TY_Dependencies) {
@@ -7111,11 +7126,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_emit_llvm);
}
-Clang::Clang(const ToolChain &TC)
+Clang::Clang(const ToolChain &TC, bool HasIntegratedBackend)
// CAUTION! The first constructor argument ("clang") is not arbitrary,
// as it is for other tools. Some operations on a Tool actually test
// whether that tool is Clang based on the Tool's Name as a string.
- : Tool("clang", "clang frontend", TC) {}
+ : Tool("clang", "clang frontend", TC), HasBackend(HasIntegratedBackend) {}
Clang::~Clang() {}
@@ -7655,11 +7670,14 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_I_Group);
// Determine the original source input.
- const Action *SourceAction = &JA;
- while (SourceAction->getKind() != Action::InputClass) {
- assert(!SourceAction->getInputs().empty() && "unexpected root action!");
- SourceAction = SourceAction->getInputs()[0];
- }
+ auto FindSource = [](const Action *S) -> const Action * {
+ while (S->getKind() != Action::InputClass) {
+ assert(!S->getInputs().empty() && "unexpected root action!");
+ S = S->getInputs()[0];
+ }
+ return S;
+ };
+ const Action *SourceAction = FindSource(&JA);
// Forward -g and handle debug info related flags, assuming we are dealing
// with an actual assembly file.
@@ -7678,6 +7696,10 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
codegenoptions::DebugInfoKind DebugInfoKind = codegenoptions::NoDebugInfo;
+ // Add the -fdebug-compilation-dir flag if needed.
+ const char *DebugCompilationDir =
+ addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
+
if (SourceAction->getType() == types::TY_Asm ||
SourceAction->getType() == types::TY_PP_Asm) {
// You might think that it would be ok to set DebugInfoKind outside of
@@ -7686,8 +7708,6 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// and it's not clear whether that test is just overly restrictive.
DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
: codegenoptions::NoDebugInfo);
- // Add the -fdebug-compilation-dir flag if needed.
- addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
addDebugPrefixMapArg(getToolChain().getDriver(), Args, CmdArgs);
@@ -7798,6 +7818,29 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_mllvm);
+ if (DebugInfoKind > codegenoptions::NoDebugInfo && Output.isFilename())
+ addDebugObjectName(Args, CmdArgs, DebugCompilationDir,
+ Output.getFilename());
+
+ // Fixup any previous commands that use -object-file-name because when we
+ // generated them, the final .obj name wasn't yet known.
+ for (Command &J : C.getJobs()) {
+ if (SourceAction != FindSource(&J.getSource()))
+ continue;
+ auto &JArgs = J.getArguments();
+ for (unsigned I = 0; I < JArgs.size(); ++I) {
+ if (StringRef(JArgs[I]).startswith("-object-file-name=") &&
+ Output.isFilename()) {
+ ArgStringList NewArgs(JArgs.begin(), JArgs.begin() + I);
+ addDebugObjectName(Args, NewArgs, DebugCompilationDir,
+ Output.getFilename());
+ NewArgs.append(JArgs.begin() + I + 1, JArgs.end());
+ J.replaceArguments(NewArgs);
+ break;
+ }
+ }
+ }
+
assert(Output.isFilename() && "Unexpected lipo output.");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -7878,7 +7921,7 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
Triples += '-';
Triples += CurTC->getTriple().normalize();
if ((CurKind == Action::OFK_HIP || CurKind == Action::OFK_Cuda) &&
- CurDep->getOffloadingArch()) {
+ !StringRef(CurDep->getOffloadingArch()).empty()) {
Triples += '-';
Triples += CurDep->getOffloadingArch();
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
index d4b4988b4a8c..00e0490e069b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
@@ -26,6 +26,10 @@ namespace tools {
/// Clang compiler tool.
class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
+ // Indicates whether this instance has integrated backend using
+ // internal LLVM infrastructure.
+ bool HasBackend;
+
public:
static const char *getBaseInputName(const llvm::opt::ArgList &Args,
const InputInfo &Input);
@@ -99,11 +103,12 @@ private:
const InputInfo &Input, const llvm::opt::ArgList &Args) const;
public:
- Clang(const ToolChain &TC);
+ Clang(const ToolChain &TC, bool HasIntegratedBackend = true);
~Clang() override;
bool hasGoodDiagnostics() const override { return true; }
bool hasIntegratedAssembler() const override { return true; }
+ bool hasIntegratedBackend() const override { return HasBackend; }
bool hasIntegratedCPP() const override { return true; }
bool canEmitIR() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 630baf9d6ae6..407f81a2ae09 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -15,7 +15,7 @@
#include "Arch/SystemZ.h"
#include "Arch/VE.h"
#include "Arch/X86.h"
-#include "HIP.h"
+#include "HIPAMD.h"
#include "Hexagon.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LangOptions.h"
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
index 06d3edc70e45..f7da3f187814 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -209,8 +209,7 @@ static bool shouldLinkerNotDedup(bool IsLinkerOnlyAction, const ArgList &Args) {
void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfoList &Inputs,
- unsigned Version[5], bool LinkerIsLLD,
- bool LinkerIsLLDDarwinNew) const {
+ unsigned Version[5], bool LinkerIsLLD) const {
const Driver &D = getToolChain().getDriver();
const toolchains::MachO &MachOTC = getMachOToolChain();
@@ -343,7 +342,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_init);
// Add the deployment target.
- if (Version[0] >= 520 || LinkerIsLLDDarwinNew)
+ if (Version[0] >= 520 || LinkerIsLLD)
MachOTC.addPlatformVersionArgs(Args, CmdArgs);
else
MachOTC.addMinVersionArgs(Args, CmdArgs);
@@ -556,14 +555,13 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args);
}
- bool LinkerIsLLD, LinkerIsLLDDarwinNew;
- const char *Exec = Args.MakeArgString(
- getToolChain().GetLinkerPath(&LinkerIsLLD, &LinkerIsLLDDarwinNew));
+ bool LinkerIsLLD;
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetLinkerPath(&LinkerIsLLD));
// I'm not sure why this particular decomposition exists in gcc, but
// we follow suite for ease of comparison.
- AddLinkArgs(C, Args, CmdArgs, Inputs, Version, LinkerIsLLD,
- LinkerIsLLDDarwinNew);
+ AddLinkArgs(C, Args, CmdArgs, Inputs, Version, LinkerIsLLD);
if (willEmitRemarks(Args) &&
checkRemarksOptions(getToolChain().getDriver(), Args,
@@ -715,7 +713,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
ResponseFileSupport ResponseSupport;
- if (Version[0] >= 705 || LinkerIsLLDDarwinNew) {
+ if (Version[0] >= 705 || LinkerIsLLD) {
ResponseSupport = ResponseFileSupport::AtFileUTF8();
} else {
// For older versions of the linker, use the legacy filelist method instead.
@@ -1412,8 +1410,8 @@ static std::string getSystemOrSDKMacOSVersion(StringRef MacOSSDKVersion) {
llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
if (!SystemTriple.isMacOSX())
return std::string(MacOSSDKVersion);
- SystemTriple.getMacOSXVersion(Major, Minor, Micro);
- VersionTuple SystemVersion(Major, Minor, Micro);
+ VersionTuple SystemVersion;
+ SystemTriple.getMacOSXVersion(SystemVersion);
bool HadExtra;
if (!Driver::GetReleaseVersion(MacOSSDKVersion, Major, Minor, Micro,
HadExtra))
@@ -1554,12 +1552,10 @@ struct DarwinPlatform {
const Optional<DarwinSDKInfo> &SDKInfo) {
DarwinPlatform Result(TargetArg, getPlatformFromOS(TT.getOS()), OSVersion,
A);
- unsigned Major, Minor, Micro;
- TT.getOSVersion(Major, Minor, Micro);
- if (Major == 0)
+ VersionTuple OsVersion = TT.getOSVersion();
+ if (OsVersion.getMajor() == 0)
Result.HasOSVersion = false;
- Result.setEnvironment(TT.getEnvironment(),
- VersionTuple(Major, Minor, Micro), SDKInfo);
+ Result.setEnvironment(TT.getEnvironment(), OsVersion, SDKInfo);
return Result;
}
static DarwinPlatform
@@ -1805,7 +1801,7 @@ inferDeploymentTargetFromSDK(DerivedArgList &Args,
std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
const Driver &TheDriver) {
- unsigned Major, Minor, Micro;
+ VersionTuple OsVersion;
llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
switch (OS) {
case llvm::Triple::Darwin:
@@ -1814,24 +1810,22 @@ std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
// macos, use the host triple to infer OS version.
if (Triple.isMacOSX() && SystemTriple.isMacOSX() &&
!Triple.getOSMajorVersion())
- SystemTriple.getMacOSXVersion(Major, Minor, Micro);
- else if (!Triple.getMacOSXVersion(Major, Minor, Micro))
+ SystemTriple.getMacOSXVersion(OsVersion);
+ else if (!Triple.getMacOSXVersion(OsVersion))
TheDriver.Diag(diag::err_drv_invalid_darwin_version)
<< Triple.getOSName();
break;
case llvm::Triple::IOS:
if (Triple.isMacCatalystEnvironment() && !Triple.getOSMajorVersion()) {
- Major = 13;
- Minor = 1;
- Micro = 0;
+ OsVersion = VersionTuple(13, 1);
} else
- Triple.getiOSVersion(Major, Minor, Micro);
+ OsVersion = Triple.getiOSVersion();
break;
case llvm::Triple::TvOS:
- Triple.getOSVersion(Major, Minor, Micro);
+ OsVersion = Triple.getOSVersion();
break;
case llvm::Triple::WatchOS:
- Triple.getWatchOSVersion(Major, Minor, Micro);
+ OsVersion = Triple.getWatchOSVersion();
break;
default:
llvm_unreachable("Unexpected OS type");
@@ -1839,7 +1833,9 @@ std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
}
std::string OSVersion;
- llvm::raw_string_ostream(OSVersion) << Major << '.' << Minor << '.' << Micro;
+ llvm::raw_string_ostream(OSVersion)
+ << OsVersion.getMajor() << '.' << OsVersion.getMinor().getValueOr(0)
+ << '.' << OsVersion.getSubminor().getValueOr(0);
return OSVersion;
}
@@ -1909,15 +1905,13 @@ getDeploymentTargetFromMTargetOSArg(DerivedArgList &Args,
return None;
}
- unsigned Major, Minor, Micro;
- TT.getOSVersion(Major, Minor, Micro);
- if (!Major) {
+ VersionTuple Version = TT.getOSVersion();
+ if (!Version.getMajor()) {
TheDriver.Diag(diag::err_drv_invalid_version_number)
<< A->getAsString(Args);
return None;
}
- return DarwinPlatform::createFromMTargetOS(TT.getOS(),
- VersionTuple(Major, Minor, Micro),
+ return DarwinPlatform::createFromMTargetOS(TT.getOS(), Version,
TT.getEnvironment(), A, SDKInfo);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
index a307cd317ac3..5e23047a5512 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
@@ -64,7 +64,7 @@ class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
const InputInfoList &Inputs, unsigned Version[5],
- bool LinkerIsLLD, bool LinkerIsLLDDarwinNew) const;
+ bool LinkerIsLLD) const;
public:
Linker(const ToolChain &TC) : MachOTool("darwin::Linker", "linker", TC) {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index d08ea282f6df..de635f5816cf 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -293,8 +293,8 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- bool Profiling = Args.hasArg(options::OPT_pg) &&
- ToolChain.getTriple().getOSMajorVersion() < 14;
+ unsigned Major = ToolChain.getTriple().getOSMajorVersion();
+ bool Profiling = Args.hasArg(options::OPT_pg) && Major != 0 && Major < 14;
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
@@ -419,8 +419,8 @@ void FreeBSD::addLibStdCxxIncludePaths(
void FreeBSD::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CXXStdlibType Type = GetCXXStdlibType(Args);
- bool Profiling =
- Args.hasArg(options::OPT_pg) && getTriple().getOSMajorVersion() < 14;
+ unsigned Major = getTriple().getOSMajorVersion();
+ bool Profiling = Args.hasArg(options::OPT_pg) && Major != 0 && Major < 14;
switch (Type) {
case ToolChain::CST_Libcxx:
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
index 07af1a0457c7..6d553791b394 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
@@ -1,4 +1,4 @@
-//===--- HIP.cpp - HIP Tool and ToolChain Implementations -------*- C++ -*-===//
+//===--- HIPAMD.cpp - HIP Tool and ToolChain Implementations ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,9 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#include "HIP.h"
+#include "HIPAMD.h"
#include "AMDGPU.h"
#include "CommonArgs.h"
+#include "HIPUtility.h"
#include "clang/Basic/Cuda.h"
#include "clang/Basic/TargetID.h"
#include "clang/Driver/Compilation.h"
@@ -34,10 +35,6 @@ using namespace llvm::opt;
#define NULL_FILE "/dev/null"
#endif
-namespace {
-const unsigned HIPCodeObjectAlign = 4096;
-} // namespace
-
static bool shouldSkipSanitizeOption(const ToolChain &TC,
const llvm::opt::ArgList &DriverArgs,
StringRef TargetID,
@@ -76,9 +73,9 @@ static bool shouldSkipSanitizeOption(const ToolChain &TC,
}
void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const InputInfo &Output,
- const llvm::opt::ArgList &Args) const {
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const {
// Construct lld command.
// The output from ld.lld is an HSA code object file.
ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined", "-shared",
@@ -129,142 +126,28 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
Lld, LldArgs, Inputs, Output));
}
-// Construct a clang-offload-bundler command to bundle code objects for
-// different GPU's into a HIP fat binary.
-void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
- StringRef OutputFileName, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, const Tool& T) {
- // Construct clang-offload-bundler command to bundle object files for
- // for different GPU archs.
- ArgStringList BundlerArgs;
- BundlerArgs.push_back(Args.MakeArgString("-type=o"));
- BundlerArgs.push_back(
- Args.MakeArgString("-bundle-align=" + Twine(HIPCodeObjectAlign)));
-
- // ToDo: Remove the dummy host binary entry which is required by
- // clang-offload-bundler.
- std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
- std::string BundlerInputArg = "-inputs=" NULL_FILE;
-
- // For code object version 2 and 3, the offload kind in bundle ID is 'hip'
- // for backward compatibility. For code object version 4 and greater, the
- // offload kind in bundle ID is 'hipv4'.
- std::string OffloadKind = "hip";
- if (getAMDGPUCodeObjectVersion(C.getDriver(), Args) >= 4)
- OffloadKind = OffloadKind + "v4";
- for (const auto &II : Inputs) {
- const auto* A = II.getAction();
- BundlerTargetArg = BundlerTargetArg + "," + OffloadKind +
- "-amdgcn-amd-amdhsa--" +
- StringRef(A->getOffloadingArch()).str();
- BundlerInputArg = BundlerInputArg + "," + II.getFilename();
- }
- BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
- BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
-
- std::string Output = std::string(OutputFileName);
- auto BundlerOutputArg =
- Args.MakeArgString(std::string("-outputs=").append(Output));
- BundlerArgs.push_back(BundlerOutputArg);
-
- const char *Bundler = Args.MakeArgString(
- T.getToolChain().GetProgramPath("clang-offload-bundler"));
- C.addCommand(std::make_unique<Command>(
- JA, T, ResponseFileSupport::None(), Bundler, BundlerArgs, Inputs,
- InputInfo(&JA, Args.MakeArgString(Output))));
-}
-
-/// Add Generated HIP Object File which has device images embedded into the
-/// host to the argument list for linking. Using MC directives, embed the
-/// device code and also define symbols required by the code generation so that
-/// the image can be retrieved at runtime.
-void AMDGCN::Linker::constructGenerateObjFileFromHIPFatBinary(
- Compilation &C, const InputInfo &Output,
- const InputInfoList &Inputs, const ArgList &Args,
- const JobAction &JA) const {
- const ToolChain &TC = getToolChain();
- std::string Name =
- std::string(llvm::sys::path::stem(Output.getFilename()));
-
- // Create Temp Object File Generator,
- // Offload Bundled file and Bundled Object file.
- // Keep them if save-temps is enabled.
- const char *McinFile;
- const char *BundleFile;
- if (C.getDriver().isSaveTempsEnabled()) {
- McinFile = C.getArgs().MakeArgString(Name + ".mcin");
- BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
- } else {
- auto TmpNameMcin = C.getDriver().GetTemporaryPath(Name, "mcin");
- McinFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameMcin));
- auto TmpNameFb = C.getDriver().GetTemporaryPath(Name, "hipfb");
- BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameFb));
- }
- constructHIPFatbinCommand(C, JA, BundleFile, Inputs, Args, *this);
-
- // Create a buffer to write the contents of the temp obj generator.
- std::string ObjBuffer;
- llvm::raw_string_ostream ObjStream(ObjBuffer);
-
- // Add MC directives to embed target binaries. We ensure that each
- // section and image is 16-byte aligned. This is not mandatory, but
- // increases the likelihood of data to be aligned with a cache block
- // in several main host machines.
- ObjStream << "# HIP Object Generator\n";
- ObjStream << "# *** Automatically generated by Clang ***\n";
- ObjStream << " .protected __hip_fatbin\n";
- ObjStream << " .type __hip_fatbin,@object\n";
- ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
- ObjStream << " .globl __hip_fatbin\n";
- ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
- << "\n";
- ObjStream << "__hip_fatbin:\n";
- ObjStream << " .incbin \"" << BundleFile << "\"\n";
- ObjStream.flush();
-
- // Dump the contents of the temp object file gen if the user requested that.
- // We support this option to enable testing of behavior with -###.
- if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
- llvm::errs() << ObjBuffer;
-
- // Open script file and write the contents.
- std::error_code EC;
- llvm::raw_fd_ostream Objf(McinFile, EC, llvm::sys::fs::OF_None);
-
- if (EC) {
- C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
- return;
- }
-
- Objf << ObjBuffer;
-
- ArgStringList McArgs{"-o", Output.getFilename(),
- McinFile, "--filetype=obj"};
- const char *Mc = Args.MakeArgString(TC.GetProgramPath("llvm-mc"));
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Mc, McArgs, Inputs, Output));
-}
-
// For amdgcn the inputs of the linker job are device bitcode and output is
// object file. It calls llvm-link, opt, llc, then lld steps.
void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
if (Inputs.size() > 0 &&
Inputs[0].getType() == types::TY_Image &&
JA.getType() == types::TY_Object)
- return constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs, Args, JA);
+ return HIP::constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs,
+ Args, JA, *this);
if (JA.getType() == types::TY_HIP_FATBIN)
- return constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs, Args, *this);
+ return HIP::constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs,
+ Args, *this);
return constructLldCommand(C, JA, Inputs, Output, Args);
}
-HIPToolChain::HIPToolChain(const Driver &D, const llvm::Triple &Triple,
- const ToolChain &HostTC, const ArgList &Args)
+HIPAMDToolChain::HIPAMDToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const ArgList &Args)
: ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
// Lookup binaries into the driver directory, this is used to
// discover the clang-offload-bundler executable.
@@ -279,9 +162,8 @@ HIPToolChain::HIPToolChain(const Driver &D, const llvm::Triple &Triple,
}
}
-void HIPToolChain::addClangTargetOptions(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
+void HIPAMDToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
@@ -324,9 +206,9 @@ void HIPToolChain::addClangTargetOptions(
}
llvm::opt::DerivedArgList *
-HIPToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
- StringRef BoundArch,
- Action::OffloadKind DeviceOffloadKind) const {
+HIPAMDToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
DerivedArgList *DAL =
HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
if (!DAL)
@@ -349,44 +231,44 @@ HIPToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
return DAL;
}
-Tool *HIPToolChain::buildLinker() const {
+Tool *HIPAMDToolChain::buildLinker() const {
assert(getTriple().getArch() == llvm::Triple::amdgcn);
return new tools::AMDGCN::Linker(*this);
}
-void HIPToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+void HIPAMDToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
HostTC.addClangWarningOptions(CC1Args);
}
ToolChain::CXXStdlibType
-HIPToolChain::GetCXXStdlibType(const ArgList &Args) const {
+HIPAMDToolChain::GetCXXStdlibType(const ArgList &Args) const {
return HostTC.GetCXXStdlibType(Args);
}
-void HIPToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+void HIPAMDToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
}
-void HIPToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
- ArgStringList &CC1Args) const {
+void HIPAMDToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &Args, ArgStringList &CC1Args) const {
HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
}
-void HIPToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
- ArgStringList &CC1Args) const {
+void HIPAMDToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
}
-void HIPToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+void HIPAMDToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
}
-SanitizerMask HIPToolChain::getSupportedSanitizers() const {
- // The HIPToolChain only supports sanitizers in the sense that it allows
+SanitizerMask HIPAMDToolChain::getSupportedSanitizers() const {
+ // The HIPAMDToolChain only supports sanitizers in the sense that it allows
// sanitizer arguments on the command line if they are supported by the host
- // toolchain. The HIPToolChain will actually ignore any command line
+ // toolchain. The HIPAMDToolChain will actually ignore any command line
// arguments for any of these "supported" sanitizers. That means that no
// sanitization of device code is actually supported at this time.
//
@@ -396,13 +278,13 @@ SanitizerMask HIPToolChain::getSupportedSanitizers() const {
return HostTC.getSupportedSanitizers();
}
-VersionTuple HIPToolChain::computeMSVCVersion(const Driver *D,
- const ArgList &Args) const {
+VersionTuple HIPAMDToolChain::computeMSVCVersion(const Driver *D,
+ const ArgList &Args) const {
return HostTC.computeMSVCVersion(D, Args);
}
llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
-HIPToolChain::getHIPDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
+HIPAMDToolChain::getHIPDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
llvm::SmallVector<BitCodeLibraryInfo, 12> BCLibs;
if (DriverArgs.hasArg(options::OPT_nogpulib))
return {};
@@ -476,11 +358,11 @@ HIPToolChain::getHIPDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
return BCLibs;
}
-void HIPToolChain::checkTargetID(const llvm::opt::ArgList &DriverArgs) const {
+void HIPAMDToolChain::checkTargetID(
+ const llvm::opt::ArgList &DriverArgs) const {
auto PTID = getParsedTargetID(DriverArgs);
if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
getDriver().Diag(clang::diag::err_drv_bad_target_id)
<< PTID.OptionalTargetID.getValue();
}
- return;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
index 60b3d69b3f52..cc472a595db9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
@@ -1,4 +1,4 @@
-//===--- HIP.h - HIP ToolChain Implementations ------------------*- C++ -*-===//
+//===--- HIPAMD.h - HIP ToolChain Implementations ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPAMD_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPAMD_H
-#include "clang/Driver/ToolChain.h"
-#include "clang/Driver/Tool.h"
#include "AMDGPU.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
namespace clang {
namespace driver {
@@ -19,11 +19,6 @@ namespace driver {
namespace tools {
namespace AMDGCN {
- // Construct command for creating HIP fatbin.
- void constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
- StringRef OutputFileName, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs, const Tool& T);
-
// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
// device library, then compiles it to ISA in a shared object.
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
@@ -38,17 +33,9 @@ public:
const char *LinkingOutput) const override;
private:
-
void constructLldCommand(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs, const InputInfo &Output,
const llvm::opt::ArgList &Args) const;
-
- // Construct command for creating Object from HIP fatbin.
- void constructGenerateObjFileFromHIPFatBinary(Compilation &C,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- const JobAction &JA) const;
};
} // end namespace AMDGCN
@@ -56,10 +43,10 @@ private:
namespace toolchains {
-class LLVM_LIBRARY_VISIBILITY HIPToolChain final : public ROCMToolChain {
+class LLVM_LIBRARY_VISIBILITY HIPAMDToolChain final : public ROCMToolChain {
public:
- HIPToolChain(const Driver &D, const llvm::Triple &Triple,
- const ToolChain &HostTC, const llvm::opt::ArgList &Args);
+ HIPAMDToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const llvm::opt::ArgList &Args);
const llvm::Triple *getAuxTriple() const override {
return &HostTC.getTriple();
@@ -68,9 +55,10 @@ public:
llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
- void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
void
@@ -105,4 +93,4 @@ protected:
} // end namespace driver
} // end namespace clang
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPAMD_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
new file mode 100644
index 000000000000..d68c87e9b3e7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
@@ -0,0 +1,292 @@
+//===--- HIPSPV.cpp - HIPSPV ToolChain Implementation -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "HIPSPV.h"
+#include "CommonArgs.h"
+#include "HIPUtility.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+// Convenience function for creating temporary file for both modes of
+// isSaveTempsEnabled().
+static const char *getTempFile(Compilation &C, StringRef Prefix,
+ StringRef Extension) {
+ if (C.getDriver().isSaveTempsEnabled()) {
+ return C.getArgs().MakeArgString(Prefix + "." + Extension);
+ }
+ auto TmpFile = C.getDriver().GetTemporaryPath(Prefix, Extension);
+ return C.addTempFile(C.getArgs().MakeArgString(TmpFile));
+}
+
+// Locates HIP pass plugin.
+static std::string findPassPlugin(const Driver &D,
+ const llvm::opt::ArgList &Args) {
+ StringRef Path = Args.getLastArgValue(options::OPT_hipspv_pass_plugin_EQ);
+ if (!Path.empty()) {
+ if (llvm::sys::fs::exists(Path))
+ return Path.str();
+ D.Diag(diag::err_drv_no_such_file) << Path;
+ }
+
+ StringRef hipPath = Args.getLastArgValue(options::OPT_hip_path_EQ);
+ if (!hipPath.empty()) {
+ SmallString<128> PluginPath(hipPath);
+ llvm::sys::path::append(PluginPath, "lib", "libLLVMHipSpvPasses.so");
+ if (llvm::sys::fs::exists(PluginPath))
+ return PluginPath.str().str();
+ PluginPath.assign(hipPath);
+ llvm::sys::path::append(PluginPath, "lib", "llvm",
+ "libLLVMHipSpvPasses.so");
+ if (llvm::sys::fs::exists(PluginPath))
+ return PluginPath.str().str();
+ }
+
+ return std::string();
+}
+
+void HIPSPV::Linker::constructLinkAndEmitSpirvCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const InputInfo &Output, const llvm::opt::ArgList &Args) const {
+
+ assert(!Inputs.empty() && "Must have at least one input.");
+ std::string Name = std::string(llvm::sys::path::stem(Output.getFilename()));
+ const char *TempFile = getTempFile(C, Name + "-link", "bc");
+
+ // Link LLVM bitcode.
+ ArgStringList LinkArgs{};
+ for (auto Input : Inputs)
+ LinkArgs.push_back(Input.getFilename());
+ LinkArgs.append({"-o", TempFile});
+ const char *LlvmLink =
+ Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ LlvmLink, LinkArgs, Inputs, Output));
+
+ // Post-link HIP lowering.
+
+ // Run LLVM IR passes to lower/expand/emulate HIP code that does not translate
+ // to SPIR-V (E.g. dynamic shared memory).
+ auto PassPluginPath = findPassPlugin(C.getDriver(), Args);
+ if (!PassPluginPath.empty()) {
+ const char *PassPathCStr = C.getArgs().MakeArgString(PassPluginPath);
+ const char *OptOutput = getTempFile(C, Name + "-lower", "bc");
+ ArgStringList OptArgs{TempFile, "-load-pass-plugin",
+ PassPathCStr, "-passes=hip-post-link-passes",
+ "-o", OptOutput};
+ const char *Opt = Args.MakeArgString(getToolChain().GetProgramPath("opt"));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::None(), Opt, OptArgs, Inputs, Output));
+ TempFile = OptOutput;
+ }
+
+ // Emit SPIR-V binary.
+
+ llvm::opt::ArgStringList TrArgs{"--spirv-max-version=1.1",
+ "--spirv-ext=+all"};
+ InputInfo TrInput = InputInfo(types::TY_LLVM_BC, TempFile, "");
+ SPIRV::constructTranslateCommand(C, *this, JA, Output, TrInput, TrArgs);
+}
+
+void HIPSPV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ if (Inputs.size() > 0 && Inputs[0].getType() == types::TY_Image &&
+ JA.getType() == types::TY_Object)
+ return HIP::constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs,
+ Args, JA, *this);
+
+ if (JA.getType() == types::TY_HIP_FATBIN)
+ return HIP::constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs,
+ Args, *this);
+
+ constructLinkAndEmitSpirvCommand(C, JA, Inputs, Output, Args);
+}
+
+HIPSPVToolChain::HIPSPVToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const ArgList &Args)
+ : ToolChain(D, Triple, Args), HostTC(HostTC) {
+ // Lookup binaries into the driver directory, this is used to
+ // discover the clang-offload-bundler executable.
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+void HIPSPVToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
+
+ assert(DeviceOffloadingKind == Action::OFK_HIP &&
+ "Only HIP offloading kinds are supported for GPUs.");
+
+ CC1Args.append(
+ {"-fcuda-is-device", "-fcuda-allow-variadic-functions",
+ // A crude workaround for llvm-spirv which does not handle the
+ // autovectorized code well (vector reductions, non-i{8,16,32,64} types).
+ // TODO: Allow autovectorization when SPIR-V backend arrives.
+ "-mllvm", "-vectorize-loops=false", "-mllvm", "-vectorize-slp=false"});
+
+ if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
+ options::OPT_fno_cuda_approx_transcendentals, false))
+ CC1Args.push_back("-fcuda-approx-transcendentals");
+
+ // Default to "hidden" visibility, as object level linking will not be
+ // supported for the foreseeable future.
+ if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat))
+ CC1Args.append(
+ {"-fvisibility", "hidden", "-fapply-global-visibility-to-externs"});
+
+ llvm::for_each(getHIPDeviceLibs(DriverArgs),
+ [&](const BitCodeLibraryInfo &BCFile) {
+ CC1Args.append({"-mlink-builtin-bitcode",
+ DriverArgs.MakeArgString(BCFile.Path)});
+ });
+}
+
+Tool *HIPSPVToolChain::buildLinker() const {
+ assert(getTriple().getArch() == llvm::Triple::spirv64);
+ return new tools::HIPSPV::Linker(*this);
+}
+
+void HIPSPVToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+ HostTC.addClangWarningOptions(CC1Args);
+}
+
+ToolChain::CXXStdlibType
+HIPSPVToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ return HostTC.GetCXXStdlibType(Args);
+}
+
+void HIPSPVToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
+
+void HIPSPVToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &Args, ArgStringList &CC1Args) const {
+ HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
+}
+
+void HIPSPVToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
+}
+
+void HIPSPVToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nogpuinc))
+ return;
+
+ StringRef hipPath = DriverArgs.getLastArgValue(options::OPT_hip_path_EQ);
+ if (hipPath.empty()) {
+ getDriver().Diag(diag::err_drv_hipspv_no_hip_path) << 1 << "'-nogpuinc'";
+ return;
+ }
+ SmallString<128> P(hipPath);
+ llvm::sys::path::append(P, "include");
+ CC1Args.append({"-isystem", DriverArgs.MakeArgString(P)});
+}
+
+llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
+HIPSPVToolChain::getHIPDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
+ llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> BCLibs;
+ if (DriverArgs.hasArg(options::OPT_nogpulib))
+ return {};
+
+ ArgStringList LibraryPaths;
+ // Find device libraries in --hip-device-lib-path and HIP_DEVICE_LIB_PATH.
+ auto HipDeviceLibPathArgs = DriverArgs.getAllArgValues(
+ // --hip-device-lib-path is alias to this option.
+ clang::driver::options::OPT_rocm_device_lib_path_EQ);
+ for (auto Path : HipDeviceLibPathArgs)
+ LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
+
+ StringRef HipPath = DriverArgs.getLastArgValue(options::OPT_hip_path_EQ);
+ if (!HipPath.empty()) {
+ SmallString<128> Path(HipPath);
+ llvm::sys::path::append(Path, "lib", "hip-device-lib");
+ LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
+ }
+
+ addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
+
+ // Maintain compatability with --hip-device-lib.
+ auto BCLibArgs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
+ if (!BCLibArgs.empty()) {
+ llvm::for_each(BCLibArgs, [&](StringRef BCName) {
+ StringRef FullName;
+ for (std::string LibraryPath : LibraryPaths) {
+ SmallString<128> Path(LibraryPath);
+ llvm::sys::path::append(Path, BCName);
+ FullName = Path;
+ if (llvm::sys::fs::exists(FullName)) {
+ BCLibs.emplace_back(FullName.str());
+ return;
+ }
+ }
+ getDriver().Diag(diag::err_drv_no_such_file) << BCName;
+ });
+ } else {
+ // Search device library named as 'hipspv-<triple>.bc'.
+ auto TT = getTriple().normalize();
+ std::string BCName = "hipspv-" + TT + ".bc";
+ for (auto *LibPath : LibraryPaths) {
+ SmallString<128> Path(LibPath);
+ llvm::sys::path::append(Path, BCName);
+ if (llvm::sys::fs::exists(Path)) {
+ BCLibs.emplace_back(Path.str().str());
+ return BCLibs;
+ }
+ }
+ getDriver().Diag(diag::err_drv_no_hipspv_device_lib)
+ << 1 << ("'" + TT + "' target");
+ return {};
+ }
+
+ return BCLibs;
+}
+
+SanitizerMask HIPSPVToolChain::getSupportedSanitizers() const {
+ // The HIPSPVToolChain only supports sanitizers in the sense that it allows
+ // sanitizer arguments on the command line if they are supported by the host
+ // toolchain. The HIPSPVToolChain will actually ignore any command line
+ // arguments for any of these "supported" sanitizers. That means that no
+ // sanitization of device code is actually supported at this time.
+ //
+ // This behavior is necessary because the host and device toolchains
+ // invocations often share the command line, so the device toolchain must
+ // tolerate flags meant only for the host toolchain.
+ return HostTC.getSupportedSanitizers();
+}
+
+VersionTuple HIPSPVToolChain::computeMSVCVersion(const Driver *D,
+ const ArgList &Args) const {
+ return HostTC.computeMSVCVersion(D, Args);
+}
+
+void HIPSPVToolChain::adjustDebugInfoKind(
+ codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const {
+ // Debug info generation is disabled for SPIRV-LLVM-Translator
+ // which currently aborts on the presence of DW_OP_LLVM_convert.
+ // TODO: Enable debug info when the SPIR-V backend arrives.
+ DebugInfoKind = codegenoptions::NoDebugInfo;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h
new file mode 100644
index 000000000000..79520f77c742
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h
@@ -0,0 +1,103 @@
+//===--- HIPSPV.h - HIP ToolChain Implementations ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPSPV_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPSPV_H
+
+#include "SPIRV.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace HIPSPV {
+
+// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
+// device library, then compiles it to SPIR-V in a shared object.
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("HIPSPV::Linker", "hipspv-link", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+private:
+ void constructLinkAndEmitSpirvCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const;
+};
+
+} // namespace HIPSPV
+} // namespace tools
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY HIPSPVToolChain final : public ToolChain {
+public:
+ HIPSPVToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const llvm::opt::ArgList &Args);
+
+ const llvm::Triple *getAuxTriple() const override {
+ return &HostTC.getTriple();
+ }
+
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ llvm::SmallVector<BitCodeLibraryInfo, 12>
+ getHIPDeviceLibs(const llvm::opt::ArgList &Args) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ VersionTuple
+ computeMSVCVersion(const Driver *D,
+ const llvm::opt::ArgList &Args) const override;
+
+ void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const override;
+ bool IsIntegratedAssemblerDefault() const override { return true; }
+ bool IsMathErrnoDefault() const override { return false; }
+ bool useIntegratedAs() const override { return true; }
+ bool isCrossCompiling() const override { return true; }
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+ bool isPICDefaultForced() const override { return false; }
+ bool SupportsProfiling() const override { return false; }
+
+ const ToolChain &HostTC;
+
+protected:
+ Tool *buildLinker() const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPSPV_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
new file mode 100644
index 000000000000..1b04a20bacbf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
@@ -0,0 +1,167 @@
+//===--- HIPUtility.cpp - Common HIP Tool Chain Utilities -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "HIPUtility.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace llvm::opt;
+
+#if defined(_WIN32) || defined(_WIN64)
+#define NULL_FILE "nul"
+#else
+#define NULL_FILE "/dev/null"
+#endif
+
+namespace {
+const unsigned HIPCodeObjectAlign = 4096;
+} // namespace
+
+// Constructs a triple string for clang offload bundler.
+static std::string normalizeForBundler(const llvm::Triple &T,
+ bool HasTargetID) {
+ return HasTargetID ? (T.getArchName() + "-" + T.getVendorName() + "-" +
+ T.getOSName() + "-" + T.getEnvironmentName())
+ .str()
+ : T.normalize();
+}
+
+// Construct a clang-offload-bundler command to bundle code objects for
+// different devices into a HIP fat binary.
+void HIP::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ llvm::StringRef OutputFileName,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ const Tool &T) {
+ // Construct clang-offload-bundler command to bundle object files for
+ // for different GPU archs.
+ ArgStringList BundlerArgs;
+ BundlerArgs.push_back(Args.MakeArgString("-type=o"));
+ BundlerArgs.push_back(
+ Args.MakeArgString("-bundle-align=" + Twine(HIPCodeObjectAlign)));
+
+ // ToDo: Remove the dummy host binary entry which is required by
+ // clang-offload-bundler.
+ std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
+ std::string BundlerInputArg = "-inputs=" NULL_FILE;
+
+ // AMDGCN:
+ // For code object version 2 and 3, the offload kind in bundle ID is 'hip'
+ // for backward compatibility. For code object version 4 and greater, the
+ // offload kind in bundle ID is 'hipv4'.
+ std::string OffloadKind = "hip";
+ auto &TT = T.getToolChain().getTriple();
+ if (TT.isAMDGCN() && getAMDGPUCodeObjectVersion(C.getDriver(), Args) >= 4)
+ OffloadKind = OffloadKind + "v4";
+ for (const auto &II : Inputs) {
+ const auto *A = II.getAction();
+ auto ArchStr = llvm::StringRef(A->getOffloadingArch());
+ BundlerTargetArg +=
+ "," + OffloadKind + "-" + normalizeForBundler(TT, !ArchStr.empty());
+ if (!ArchStr.empty())
+ BundlerTargetArg += "-" + ArchStr.str();
+ BundlerInputArg = BundlerInputArg + "," + II.getFilename();
+ }
+ BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
+ BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+
+ std::string Output = std::string(OutputFileName);
+ auto *BundlerOutputArg =
+ Args.MakeArgString(std::string("-outputs=").append(Output));
+ BundlerArgs.push_back(BundlerOutputArg);
+
+ const char *Bundler = Args.MakeArgString(
+ T.getToolChain().GetProgramPath("clang-offload-bundler"));
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::None(), Bundler, BundlerArgs, Inputs,
+ InputInfo(&JA, Args.MakeArgString(Output))));
+}
+
+/// Add Generated HIP Object File which has device images embedded into the
+/// host to the argument list for linking. Using MC directives, embed the
+/// device code and also define symbols required by the code generation so that
+/// the image can be retrieved at runtime.
+void HIP::constructGenerateObjFileFromHIPFatBinary(
+ Compilation &C, const InputInfo &Output, const InputInfoList &Inputs,
+ const ArgList &Args, const JobAction &JA, const Tool &T) {
+ const ToolChain &TC = T.getToolChain();
+ std::string Name = std::string(llvm::sys::path::stem(Output.getFilename()));
+
+ // Create Temp Object File Generator,
+ // Offload Bundled file and Bundled Object file.
+ // Keep them if save-temps is enabled.
+ const char *McinFile;
+ const char *BundleFile;
+ if (C.getDriver().isSaveTempsEnabled()) {
+ McinFile = C.getArgs().MakeArgString(Name + ".mcin");
+ BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
+ } else {
+ auto TmpNameMcin = C.getDriver().GetTemporaryPath(Name, "mcin");
+ McinFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameMcin));
+ auto TmpNameFb = C.getDriver().GetTemporaryPath(Name, "hipfb");
+ BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameFb));
+ }
+ HIP::constructHIPFatbinCommand(C, JA, BundleFile, Inputs, Args, T);
+
+ // Create a buffer to write the contents of the temp obj generator.
+ std::string ObjBuffer;
+ llvm::raw_string_ostream ObjStream(ObjBuffer);
+
+ auto HostTriple =
+ C.getSingleOffloadToolChain<Action::OFK_Host>()->getTriple();
+
+ // Add MC directives to embed target binaries. We ensure that each
+ // section and image is 16-byte aligned. This is not mandatory, but
+ // increases the likelihood of data to be aligned with a cache block
+ // in several main host machines.
+ ObjStream << "# HIP Object Generator\n";
+ ObjStream << "# *** Automatically generated by Clang ***\n";
+ if (HostTriple.isWindowsMSVCEnvironment()) {
+ ObjStream << " .section .hip_fatbin, \"dw\"\n";
+ } else {
+ ObjStream << " .protected __hip_fatbin\n";
+ ObjStream << " .type __hip_fatbin,@object\n";
+ ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
+ }
+ ObjStream << " .globl __hip_fatbin\n";
+ ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
+ << "\n";
+ ObjStream << "__hip_fatbin:\n";
+ ObjStream << " .incbin ";
+ llvm::sys::printArg(ObjStream, BundleFile, /*Quote=*/true);
+ ObjStream << "\n";
+ ObjStream.flush();
+
+ // Dump the contents of the temp object file gen if the user requested that.
+ // We support this option to enable testing of behavior with -###.
+ if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
+ llvm::errs() << ObjBuffer;
+
+ // Open script file and write the contents.
+ std::error_code EC;
+ llvm::raw_fd_ostream Objf(McinFile, EC, llvm::sys::fs::OF_None);
+
+ if (EC) {
+ C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return;
+ }
+
+ Objf << ObjBuffer;
+
+ ArgStringList McArgs{"-triple", Args.MakeArgString(HostTriple.normalize()),
+ "-o", Output.getFilename(),
+ McinFile, "--filetype=obj"};
+ const char *Mc = Args.MakeArgString(TC.GetProgramPath("llvm-mc"));
+ C.addCommand(std::make_unique<Command>(JA, T, ResponseFileSupport::None(), Mc,
+ McArgs, Inputs, Output));
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h
new file mode 100644
index 000000000000..29e5a922024a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h
@@ -0,0 +1,35 @@
+//===--- HIPUtility.h - Common HIP Tool Chain Utilities ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPUTILITY_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPUTILITY_H
+
+#include "clang/Driver/Tool.h"
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace HIP {
+
+// Construct command for creating HIP fatbin.
+void constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ StringRef OutputFileName,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs, const Tool &T);
+
+// Construct command for creating Object from HIP fatbin.
+void constructGenerateObjFileFromHIPFatBinary(
+ Compilation &C, const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args, const JobAction &JA, const Tool &T);
+
+} // namespace HIP
+} // namespace tools
+} // namespace driver
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPUTILITY_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
index 18270818d158..ba3040636604 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -26,8 +26,8 @@ using namespace clang;
using namespace llvm::opt;
// Default hvx-length for various versions.
-static StringRef getDefaultHvxLength(StringRef Cpu) {
- return llvm::StringSwitch<StringRef>(Cpu)
+static StringRef getDefaultHvxLength(StringRef HvxVer) {
+ return llvm::StringSwitch<StringRef>(HvxVer)
.Case("v60", "64b")
.Case("v62", "64b")
.Case("v65", "64b")
@@ -51,42 +51,107 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
// Handle HVX warnings.
handleHVXWarnings(D, Args);
- // Add the +hvx* features based on commandline flags.
- StringRef HVXFeature, HVXLength;
-
- // Handle -mhvx, -mhvx=, -mno-hvx.
- if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx,
- options::OPT_mhexagon_hvx,
- options::OPT_mhexagon_hvx_EQ)) {
- if (A->getOption().matches(options::OPT_mno_hexagon_hvx))
- return;
- if (A->getOption().matches(options::OPT_mhexagon_hvx_EQ)) {
- HasHVX = true;
- HVXFeature = Cpu = A->getValue();
- HVXFeature = Args.MakeArgString(llvm::Twine("+hvx") + HVXFeature.lower());
- } else if (A->getOption().matches(options::OPT_mhexagon_hvx)) {
- HasHVX = true;
- HVXFeature = Args.MakeArgString(llvm::Twine("+hvx") + Cpu);
+ auto makeFeature = [&Args](Twine T, bool Enable) -> StringRef {
+ const std::string &S = T.str();
+ StringRef Opt(S);
+ if (Opt.endswith("="))
+ Opt = Opt.drop_back(1);
+ if (Opt.startswith("mno-"))
+ Opt = Opt.drop_front(4);
+ else if (Opt.startswith("m"))
+ Opt = Opt.drop_front(1);
+ return Args.MakeArgString(Twine(Enable ? "+" : "-") + Twine(Opt));
+ };
+
+ auto withMinus = [](StringRef S) -> std::string {
+ return "-" + S.str();
+ };
+
+ // Drop tiny core suffix for HVX version.
+ std::string HvxVer =
+ (Cpu.back() == 'T' || Cpu.back() == 't' ? Cpu.drop_back(1) : Cpu).str();
+ HasHVX = false;
+
+ // Handle -mhvx, -mhvx=, -mno-hvx. If both present, -mhvx= wins over -mhvx.
+ auto argOrNull = [&Args](auto FlagOn, auto FlagOff) -> Arg* {
+ if (Arg *A = Args.getLastArg(FlagOn, FlagOff)) {
+ if (A->getOption().matches(FlagOn))
+ return A;
}
- Features.push_back(HVXFeature);
+ return nullptr;
+ };
+
+ Arg *HvxBareA =
+ argOrNull(options::OPT_mhexagon_hvx, options::OPT_mno_hexagon_hvx);
+ Arg *HvxVerA =
+ argOrNull(options::OPT_mhexagon_hvx_EQ, options::OPT_mno_hexagon_hvx);
+
+ if (Arg *A = HvxVerA ? HvxVerA : HvxBareA) {
+ if (A->getOption().matches(options::OPT_mhexagon_hvx_EQ))
+ HvxVer = StringRef(A->getValue()).lower(); // lower produces std:string
+ HasHVX = true;
+ Features.push_back(makeFeature(Twine("hvx") + HvxVer, true));
+ } else if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx)) {
+ // If there was an explicit -mno-hvx, add -hvx to target features.
+ Features.push_back(makeFeature(A->getOption().getName(), false));
}
+ StringRef HvxLen = getDefaultHvxLength(HvxVer);
+
// Handle -mhvx-length=.
if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ)) {
// These flags are valid only if HVX in enabled.
if (!HasHVX)
- D.Diag(diag::err_drv_invalid_hvx_length);
+ D.Diag(diag::err_drv_needs_hvx) << withMinus(A->getOption().getName());
else if (A->getOption().matches(options::OPT_mhexagon_hvx_length_EQ))
- HVXLength = A->getValue();
+ HvxLen = A->getValue();
+ }
+
+ if (HasHVX) {
+ StringRef L = makeFeature(Twine("hvx-length") + HvxLen.lower(), true);
+ Features.push_back(L);
}
- // Default hvx-length based on Cpu.
- else if (HasHVX)
- HVXLength = getDefaultHvxLength(Cpu);
-
- if (!HVXLength.empty()) {
- HVXFeature =
- Args.MakeArgString(llvm::Twine("+hvx-length") + HVXLength.lower());
- Features.push_back(HVXFeature);
+
+ unsigned HvxVerNum;
+ // getAsInteger returns 'true' on error.
+ if (StringRef(HvxVer).drop_front(1).getAsInteger(10, HvxVerNum))
+ HvxVerNum = 0;
+
+ // Handle HVX floating point flags.
+ auto checkFlagHvxVersion = [&](auto FlagOn, auto FlagOff,
+ unsigned MinVerNum) -> Optional<StringRef> {
+ // Return an Optional<StringRef>:
+ // - None indicates a verification failure, or that the flag was not
+ // present in Args.
+ // - Otherwise the returned value is that name of the feature to add
+ // to Features.
+ Arg *A = Args.getLastArg(FlagOn, FlagOff);
+ if (!A)
+ return None;
+
+ StringRef OptName = A->getOption().getName();
+ if (A->getOption().matches(FlagOff))
+ return makeFeature(OptName, false);
+
+ if (!HasHVX) {
+ D.Diag(diag::err_drv_needs_hvx) << withMinus(OptName);
+ return None;
+ }
+ if (HvxVerNum < MinVerNum) {
+ D.Diag(diag::err_drv_needs_hvx_version)
+ << withMinus(OptName) << ("v" + std::to_string(HvxVerNum));
+ return None;
+ }
+ return makeFeature(OptName, true);
+ };
+
+ if (auto F = checkFlagHvxVersion(options::OPT_mhexagon_hvx_qfloat,
+ options::OPT_mno_hexagon_hvx_qfloat, 68)) {
+ Features.push_back(*F);
+ }
+ if (auto F = checkFlagHvxVersion(options::OPT_mhexagon_hvx_ieee_fp,
+ options::OPT_mno_hexagon_hvx_ieee_fp, 68)) {
+ Features.push_back(*F);
}
}
@@ -117,7 +182,7 @@ void hexagon::getHexagonTargetFeatures(const Driver &D, const ArgList &Args,
handleHVXTargetFeatures(D, Args, Features, Cpu, HasHVX);
if (HexagonToolChain::isAutoHVXEnabled(Args) && !HasHVX)
- D.Diag(diag::warn_drv_vectorize_needs_hvx);
+ D.Diag(diag::warn_drv_needs_hvx) << "auto-vectorization";
}
// Hexagon tools start.
@@ -156,6 +221,12 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fsyntax-only");
}
+ if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_ieee_fp,
+ options::OPT_mno_hexagon_hvx_ieee_fp)) {
+ if (A->getOption().matches(options::OPT_mhexagon_hvx_ieee_fp))
+ CmdArgs.push_back("-mhvx-ieee-fp");
+ }
+
if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
CmdArgs.push_back(Args.MakeArgString("-gpsize=" + Twine(G.getValue())));
}
@@ -226,6 +297,7 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
StringRef CpuVer = toolchains::HexagonToolChain::GetTargetCPUVersion(Args);
bool NeedsSanitizerDeps = addSanitizerRuntimes(HTC, Args, CmdArgs);
+ bool NeedsXRayDeps = addXRayRuntime(HTC, Args, CmdArgs);
//----------------------------------------------------------------------------
// Silence warnings for various options
@@ -297,6 +369,8 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lunwind");
}
+ if (NeedsXRayDeps)
+ linkXRayRuntimeDeps(HTC, CmdArgs);
CmdArgs.push_back("-lclang_rt.builtins-hexagon");
CmdArgs.push_back("-lc");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
index 198774506e5e..e413640abad3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
@@ -277,14 +277,11 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// Android sysroots contain a library directory for each supported OS
// version as well as some unversioned libraries in the usual multiarch
// directory.
- unsigned Major;
- unsigned Minor;
- unsigned Micro;
- Triple.getEnvironmentVersion(Major, Minor, Micro);
- addPathIfExists(D,
- SysRoot + "/usr/lib/" + MultiarchTriple + "/" +
- llvm::to_string(Major),
- Paths);
+ addPathIfExists(
+ D,
+ SysRoot + "/usr/lib/" + MultiarchTriple + "/" +
+ llvm::to_string(Triple.getEnvironmentVersion().getMajor()),
+ Paths);
}
addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
@@ -666,8 +663,8 @@ void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
}
bool Linux::isPIEDefault(const llvm::opt::ArgList &Args) const {
- return getTriple().isAndroid() || getTriple().isMusl() ||
- getSanitizerArgs(Args).requiresPIE();
+ return CLANG_DEFAULT_PIE_ON_LINUX || getTriple().isAndroid() ||
+ getTriple().isMusl() || getSanitizerArgs(Args).requiresPIE();
}
bool Linux::IsAArch64OutlineAtomicsDefault(const ArgList &Args) const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
index 792b0a51fea0..66e9d8ab525a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -1194,14 +1194,6 @@ bool MSVCToolChain::getUniversalCRTLibraryPath(const ArgList &Args,
return true;
}
-static VersionTuple getMSVCVersionFromTriple(const llvm::Triple &Triple) {
- unsigned Major, Minor, Micro;
- Triple.getEnvironmentVersion(Major, Minor, Micro);
- if (Major || Minor || Micro)
- return VersionTuple(Major, Minor, Micro);
- return VersionTuple();
-}
-
static VersionTuple getMSVCVersionFromExe(const std::string &BinDir) {
VersionTuple Version;
#ifdef _WIN32
@@ -1374,7 +1366,7 @@ VersionTuple MSVCToolChain::computeMSVCVersion(const Driver *D,
bool IsWindowsMSVC = getTriple().isWindowsMSVCEnvironment();
VersionTuple MSVT = ToolChain::computeMSVCVersion(D, Args);
if (MSVT.empty())
- MSVT = getMSVCVersionFromTriple(getTriple());
+ MSVT = getTriple().getEnvironmentVersion();
if (MSVT.empty() && IsWindowsMSVC)
MSVT = getMSVCVersionFromExe(getSubDirectoryPath(SubDirectoryType::Bin));
if (MSVT.empty() &&
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
index 7571398b7cc6..37b1fc5215ff 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -270,10 +270,9 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.getCompilerRTPath()));
}
- unsigned Major, Minor, Micro;
- Triple.getOSVersion(Major, Minor, Micro);
+ VersionTuple OsVersion = Triple.getOSVersion();
bool useLibgcc = true;
- if (Major >= 7 || Major == 0) {
+ if (OsVersion >= VersionTuple(7) || OsVersion.getMajor() == 0) {
switch (ToolChain.getArch()) {
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
@@ -409,9 +408,8 @@ Tool *NetBSD::buildAssembler() const {
Tool *NetBSD::buildLinker() const { return new tools::netbsd::Linker(*this); }
ToolChain::CXXStdlibType NetBSD::GetDefaultCXXStdlibType() const {
- unsigned Major, Minor, Micro;
- getTriple().getOSVersion(Major, Minor, Micro);
- if (Major >= 7 || Major == 0) {
+ VersionTuple OsVersion = getTriple().getOSVersion();
+ if (OsVersion >= VersionTuple(7) || OsVersion.getMajor() == 0) {
switch (getArch()) {
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
@@ -505,14 +503,13 @@ void NetBSD::addClangTargetOptions(const ArgList &DriverArgs,
if (SanArgs.hasAnySanitizer())
CC1Args.push_back("-D_REENTRANT");
- unsigned Major, Minor, Micro;
- getTriple().getOSVersion(Major, Minor, Micro);
+ VersionTuple OsVersion = getTriple().getOSVersion();
bool UseInitArrayDefault =
- Major >= 9 || Major == 0 ||
- getTriple().getArch() == llvm::Triple::aarch64 ||
- getTriple().getArch() == llvm::Triple::aarch64_be ||
- getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::armeb;
+ OsVersion >= VersionTuple(9) || OsVersion.getMajor() == 0 ||
+ getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::aarch64_be ||
+ getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::armeb;
if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array, UseInitArrayDefault))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
index 16e72d3c733f..50d03e79bbb0 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
@@ -13,6 +13,7 @@
#include "clang/Driver/Options.h"
using namespace clang::driver;
+using namespace clang::driver::toolchains;
using namespace clang::driver::tools;
using namespace llvm::opt;
@@ -27,7 +28,7 @@ void SPIRV::constructTranslateCommand(Compilation &C, const Tool &T,
if (Input.getType() == types::TY_PP_Asm)
CmdArgs.push_back("-to-binary");
if (Output.getType() == types::TY_PP_Asm)
- CmdArgs.push_back("-spirv-text");
+ CmdArgs.push_back("--spirv-tools-dis");
CmdArgs.append({"-o", Output.getFilename()});
@@ -47,3 +48,25 @@ void SPIRV::Translator::ConstructJob(Compilation &C, const JobAction &JA,
llvm_unreachable("Invalid number of input files.");
constructTranslateCommand(C, *this, JA, Output, Inputs[0], {});
}
+
+clang::driver::Tool *SPIRVToolChain::getTranslator() const {
+ if (!Translator)
+ Translator = std::make_unique<SPIRV::Translator>(*this);
+ return Translator.get();
+}
+
+clang::driver::Tool *SPIRVToolChain::SelectTool(const JobAction &JA) const {
+ Action::ActionClass AC = JA.getKind();
+ return SPIRVToolChain::getTool(AC);
+}
+
+clang::driver::Tool *SPIRVToolChain::getTool(Action::ActionClass AC) const {
+ switch (AC) {
+ default:
+ break;
+ case Action::BackendJobClass:
+ case Action::AssembleJobClass:
+ return SPIRVToolChain::getTranslator();
+ }
+ return ToolChain::getTool(AC);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
index 35d0446bd8b8..229f7018e3b5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
@@ -41,6 +41,39 @@ public:
} // namespace SPIRV
} // namespace tools
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY SPIRVToolChain final : public ToolChain {
+ mutable std::unique_ptr<Tool> Translator;
+
+public:
+ SPIRVToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : ToolChain(D, Triple, Args) {}
+
+ bool useIntegratedAs() const override { return true; }
+ bool useIntegratedBackend() const override { return false; }
+
+ bool IsMathErrnoDefault() const override { return false; }
+ bool isCrossCompiling() const override { return true; }
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+ bool isPICDefaultForced() const override { return false; }
+ bool SupportsProfiling() const override { return false; }
+
+ clang::driver::Tool *SelectTool(const JobAction &JA) const override;
+
+protected:
+ clang::driver::Tool *getTool(Action::ActionClass AC) const override;
+
+private:
+ clang::driver::Tool *getTranslator() const;
+};
+
+} // namespace toolchains
} // namespace driver
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
index 1fcc52684baa..4cdeec7f9d8a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
@@ -28,17 +28,27 @@ VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple,
getProgramPaths().push_back("/opt/nec/ve/bin");
// ProgramPaths are found via 'PATH' environment variable.
- // default file paths are:
- // ${RESOURCEDIR}/lib/linux/ve (== getArchSpecificLibPath)
- // /lib/../lib64
- // /usr/lib/../lib64
- // ${BINPATH}/../lib
- // /lib
- // /usr/lib
- //
- // These are OK for host, but no go for VE. So, defines them all
- // from scratch here.
+ // Default library paths are following:
+ // ${RESOURCEDIR}/lib/ve-unknown-linux-gnu,
+ // These are OK.
+
+ // Default file paths are following:
+ // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPath)
+ // /lib/../lib64,
+ // /usr/lib/../lib64,
+ // ${BINPATH}/../lib,
+ // /lib,
+ // /usr/lib,
+ // These are OK for host, but no go for VE.
+
+ // Define file paths from scratch here.
getFilePaths().clear();
+
+ // Add library directories:
+ // ${BINPATH}/../lib/ve-unknown-linux-gnu, (== getStdlibPath)
+ // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPath)
+ // ${SYSROOT}/opt/nec/ve/lib,
+ getFilePaths().push_back(getStdlibPath());
getFilePaths().push_back(getArchSpecificLibPath());
getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/lib");
}
@@ -115,9 +125,10 @@ void VEToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArrayRef<StringRef> DirVec(Dirs);
addSystemIncludes(DriverArgs, CC1Args, DirVec);
} else {
- SmallString<128> P(getDriver().ResourceDir);
- llvm::sys::path::append(P, "include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P);
+ // Add following paths for multiple target installation.
+ // ${INSTALLDIR}/include/ve-unknown-linux-gnu/c++/v1,
+ // ${INSTALLDIR}/include/c++/v1,
+ addLibCxxIncludePaths(DriverArgs, CC1Args);
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
index b44509ad3b88..63b575178bd1 100644
--- a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
@@ -40,6 +40,7 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
case llvm::Triple::x86_64:
case llvm::Triple::arm:
case llvm::Triple::aarch64:
+ case llvm::Triple::hexagon:
case llvm::Triple::ppc64le:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
index 968b35bfda23..5d03c9811e1b 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
@@ -91,7 +91,7 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
// In JavaScript, some @tags can be followed by {, and machinery that parses
// these comments will fail to understand the comment if followed by a line
// break. So avoid ever breaking before a {.
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
StringRef::size_type SpaceOffset =
Text.find_first_of(Blanks, MaxSplitBytes);
if (SpaceOffset != StringRef::npos && SpaceOffset + 1 < Text.size() &&
@@ -127,8 +127,7 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
}
// Avoid ever breaking before a @tag or a { in JavaScript.
- if (Style.Language == FormatStyle::LK_JavaScript &&
- SpaceOffset + 1 < Text.size() &&
+ if (Style.isJavaScript() && SpaceOffset + 1 < Text.size() &&
(Text[SpaceOffset + 1] == '{' || Text[SpaceOffset + 1] == '@')) {
SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
continue;
@@ -460,8 +459,7 @@ BreakableBlockComment::BreakableBlockComment(
IndentAtLineBreak = std::max<unsigned>(IndentAtLineBreak, Decoration.size());
// Detect a multiline jsdoc comment and set DelimitersOnNewline in that case.
- if (Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Java) {
+ if (Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) {
if ((Lines[0] == "*" || Lines[0].startswith("* ")) && Lines.size() > 1) {
// This is a multiline jsdoc comment.
DelimitersOnNewline = true;
@@ -580,8 +578,7 @@ const llvm::StringSet<>
};
unsigned BreakableBlockComment::getContentIndent(unsigned LineIndex) const {
- if (Style.Language != FormatStyle::LK_Java &&
- Style.Language != FormatStyle::LK_JavaScript)
+ if (Style.Language != FormatStyle::LK_Java && !Style.isJavaScript())
return 0;
// The content at LineIndex 0 of a comment like:
// /** line 0 */
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
index 5073f5105d05..4225d6b67b0e 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
@@ -422,8 +422,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// ...
// }.bind(...));
// FIXME: We should find a more generic solution to this problem.
- !(State.Column <= NewLineColumn &&
- Style.Language == FormatStyle::LK_JavaScript) &&
+ !(State.Column <= NewLineColumn && Style.isJavaScript()) &&
!(Previous.closesScopeAfterBlock() && State.Column <= NewLineColumn))
return true;
@@ -493,14 +492,14 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
return true;
}
- // Break after the closing parenthesis of TypeScript decorators before
- // functions, getters and setters.
- static const llvm::StringSet<> BreakBeforeDecoratedTokens = {"get", "set",
- "function"};
- if (Style.Language == FormatStyle::LK_JavaScript &&
- BreakBeforeDecoratedTokens.contains(Current.TokenText) &&
- Previous.is(tok::r_paren) && Previous.is(TT_JavaAnnotation)) {
- return true;
+ if (Style.isJavaScript() && Previous.is(tok::r_paren) &&
+ Previous.is(TT_JavaAnnotation)) {
+ // Break after the closing parenthesis of TypeScript decorators before
+ // functions, getters and setters.
+ static const llvm::StringSet<> BreakBeforeDecoratedTokens = {"get", "set",
+ "function"};
+ if (BreakBeforeDecoratedTokens.contains(Current.TokenText))
+ return true;
}
// If the return type spans multiple lines, wrap before the function name.
@@ -510,7 +509,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
Style.AlwaysBreakAfterReturnType != FormatStyle::RTBS_None) &&
// Don't always break between a JavaScript `function` and the function
// name.
- Style.Language != FormatStyle::LK_JavaScript) ||
+ !Style.isJavaScript()) ||
(Current.is(tok::kw_operator) && !Previous.is(tok::coloncolon))) &&
!Previous.is(tok::kw_template) && State.Stack.back().BreakBeforeParameter)
return true;
@@ -827,9 +826,8 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// is common and should be formatted like a free-standing function. The same
// goes for wrapping before the lambda return type arrow.
if (!Current.is(TT_LambdaArrow) &&
- (Style.Language != FormatStyle::LK_JavaScript ||
- Current.NestingLevel != 0 || !PreviousNonComment ||
- !PreviousNonComment->is(tok::equal) ||
+ (!Style.isJavaScript() || Current.NestingLevel != 0 ||
+ !PreviousNonComment || !PreviousNonComment->is(tok::equal) ||
!Current.isOneOf(Keywords.kw_async, Keywords.kw_function)))
State.Stack.back().NestedBlockIndent = State.Column;
@@ -1337,6 +1335,9 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
bool Newline) {
const FormatToken &Current = *State.NextToken;
+ if (Current.FakeLParens.empty())
+ return;
+
const FormatToken *Previous = Current.getPreviousNonComment();
// Don't add extra indentation for the first fake parenthesis after
@@ -1348,10 +1349,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
(Previous->getPrecedence() == prec::Assignment &&
Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
Previous->is(TT_ObjCMethodExpr)));
- for (SmallVectorImpl<prec::Level>::const_reverse_iterator
- I = Current.FakeLParens.rbegin(),
- E = Current.FakeLParens.rend();
- I != E; ++I) {
+ for (const auto &PrecedenceLevel : llvm::reverse(Current.FakeLParens)) {
ParenState NewParenState = State.Stack.back();
NewParenState.Tok = nullptr;
NewParenState.ContainsLineBreak = false;
@@ -1363,7 +1361,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
NewParenState.NoLineBreak || State.Stack.back().NoLineBreakInOperand;
// Don't propagate AvoidBinPacking into subexpressions of arg/param lists.
- if (*I > prec::Comma)
+ if (PrecedenceLevel > prec::Comma)
NewParenState.AvoidBinPacking = false;
// Indent from 'LastSpace' unless these are fake parentheses encapsulating
@@ -1371,11 +1369,11 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// brackets is disabled.
if (!Current.isTrailingComment() &&
(Style.AlignOperands != FormatStyle::OAS_DontAlign ||
- *I < prec::Assignment) &&
+ PrecedenceLevel < prec::Assignment) &&
(!Previous || Previous->isNot(tok::kw_return) ||
- (Style.Language != FormatStyle::LK_Java && *I > 0)) &&
+ (Style.Language != FormatStyle::LK_Java && PrecedenceLevel > 0)) &&
(Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
- *I != prec::Comma || Current.NestingLevel == 0)) {
+ PrecedenceLevel != prec::Comma || Current.NestingLevel == 0)) {
NewParenState.Indent =
std::max(std::max(State.Column, NewParenState.Indent),
State.Stack.back().LastSpace);
@@ -1384,7 +1382,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
if (Previous &&
(Previous->getPrecedence() == prec::Assignment ||
Previous->is(tok::kw_return) ||
- (*I == prec::Conditional && Previous->is(tok::question) &&
+ (PrecedenceLevel == prec::Conditional && Previous->is(tok::question) &&
Previous->is(TT_ConditionalExpr))) &&
!Newline) {
// If BreakBeforeBinaryOperators is set, un-indent a bit to account for
@@ -1402,9 +1400,9 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// ParameterToInnerFunction));
// OuterFunction(SomeObject.InnerFunctionCall( // break
// ParameterToInnerFunction));
- if (*I > prec::Unknown)
+ if (PrecedenceLevel > prec::Unknown)
NewParenState.LastSpace = std::max(NewParenState.LastSpace, State.Column);
- if (*I != prec::Conditional && !Current.is(TT_UnaryOperator) &&
+ if (PrecedenceLevel != prec::Conditional && !Current.is(TT_UnaryOperator) &&
Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
NewParenState.StartOfFunctionCall = State.Column;
@@ -1413,17 +1411,18 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// an assignment (i.e. *I <= prec::Assignment) as those have different
// indentation rules. Indent other expression, unless the indentation needs
// to be skipped.
- if (*I == prec::Conditional && Previous && Previous->is(tok::colon) &&
- Previous->is(TT_ConditionalExpr) && I == Current.FakeLParens.rbegin() &&
+ if (PrecedenceLevel == prec::Conditional && Previous &&
+ Previous->is(tok::colon) && Previous->is(TT_ConditionalExpr) &&
+ &PrecedenceLevel == &Current.FakeLParens.back() &&
!State.Stack.back().IsWrappedConditional) {
NewParenState.IsChainedConditional = true;
NewParenState.UnindentOperator = State.Stack.back().UnindentOperator;
- } else if (*I == prec::Conditional ||
- (!SkipFirstExtraIndent && *I > prec::Assignment &&
+ } else if (PrecedenceLevel == prec::Conditional ||
+ (!SkipFirstExtraIndent && PrecedenceLevel > prec::Assignment &&
!Current.isTrailingComment())) {
NewParenState.Indent += Style.ContinuationIndentWidth;
}
- if ((Previous && !Previous->opensScope()) || *I != prec::Comma)
+ if ((Previous && !Previous->opensScope()) || PrecedenceLevel != prec::Comma)
NewParenState.BreakBeforeParameter = false;
State.Stack.push_back(NewParenState);
SkipFirstExtraIndent = false;
@@ -1518,7 +1517,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
AvoidBinPacking =
(State.Stack.back().IsCSharpGenericTypeConstraint) ||
- (Style.Language == FormatStyle::LK_JavaScript && EndsInComma) ||
+ (Style.isJavaScript() && EndsInComma) ||
(State.Line->MustBeDeclaration && !BinPackDeclaration) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
(Style.ExperimentalAutoDetectBinPacking &&
@@ -1547,7 +1546,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
}
}
- if (Style.Language == FormatStyle::LK_JavaScript && EndsInComma)
+ if (Style.isJavaScript() && EndsInComma)
BreakBeforeParameter = true;
}
// Generally inherit NoLineBreak from the current scope to nested scope.
@@ -1924,9 +1923,9 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
// FIXME: String literal breaking is currently disabled for C#, Java, Json
// and JavaScript, as it requires strings to be merged using "+" which we
// don't support.
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp() ||
- Style.isJson() || !Style.BreakStringLiterals || !AllowBreak)
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp() || Style.isJson() || !Style.BreakStringLiterals ||
+ !AllowBreak)
return nullptr;
// Don't break string literals inside preprocessor directives (except for
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index 17de1075aeaa..be01daa38929 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -2586,12 +2586,31 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
bool MainIncludeFound = false;
bool FormattingOff = false;
+ llvm::Regex RawStringRegex(
+ "R\"(([\\[A-Za-z0-9_{}#<>%:;.?*+/^&\\$|~!=,'\\-]|])*)\\(");
+ SmallVector<StringRef, 2> RawStringMatches;
+ std::string RawStringTermination = ")\"";
+
for (;;) {
auto Pos = Code.find('\n', SearchFrom);
StringRef Line =
Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
StringRef Trimmed = Line.trim();
+
+ // #includes inside raw string literals need to be ignored.
+ // or we will sort the contents of the string.
+ // Skip past until we think we are at the rawstring literal close.
+ if (RawStringRegex.match(Trimmed, &RawStringMatches)) {
+ std::string CharSequence = RawStringMatches[1].str();
+ RawStringTermination = ")" + CharSequence + "\"";
+ FormattingOff = true;
+ }
+
+ if (Trimmed.contains(RawStringTermination)) {
+ FormattingOff = false;
+ }
+
if (Trimmed == "// clang-format off" || Trimmed == "/* clang-format off */")
FormattingOff = true;
else if (Trimmed == "// clang-format on" ||
@@ -3031,8 +3050,7 @@ reformat(const FormatStyle &Style, StringRef Code,
});
}
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Style.JavaScriptQuotes != FormatStyle::JSQS_Leave)
+ if (Style.isJavaScript() && Style.JavaScriptQuotes != FormatStyle::JSQS_Leave)
Passes.emplace_back([&](const Environment &Env) {
return JavaScriptRequoter(Env, Expanded).process();
});
@@ -3041,7 +3059,7 @@ reformat(const FormatStyle &Style, StringRef Code,
return Formatter(Env, Expanded, Status).process();
});
- if (Style.Language == FormatStyle::LK_JavaScript &&
+ if (Style.isJavaScript() &&
Style.InsertTrailingCommas == FormatStyle::TCS_Wrapped)
Passes.emplace_back([&](const Environment &Env) {
return TrailingCommaInserter(Env, Expanded).process();
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
index 6768f041135c..57f8a5a45cbb 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
@@ -70,6 +70,10 @@ bool FormatToken::isSimpleTypeSpecifier() const {
}
}
+bool FormatToken::isTypeOrIdentifier() const {
+ return isSimpleTypeSpecifier() || Tok.isOneOf(tok::kw_auto, tok::identifier);
+}
+
TokenRole::~TokenRole() {}
void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index 1a2858018fde..d410ede32240 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -521,7 +521,9 @@ public:
}
/// Determine whether the token is a simple-type-specifier.
- bool isSimpleTypeSpecifier() const;
+ LLVM_NODISCARD bool isSimpleTypeSpecifier() const;
+
+ LLVM_NODISCARD bool isTypeOrIdentifier() const;
bool isObjCAccessSpecifier() const {
return is(tok::at) && Next &&
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index 64fbd2d5d45b..7736a7042f86 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -78,7 +78,7 @@ ArrayRef<FormatToken *> FormatTokenLexer::lex() {
assert(FirstInLineIndex == 0);
do {
Tokens.push_back(getNextToken());
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
tryParseJSRegexLiteral();
handleTemplateStrings();
}
@@ -107,7 +107,7 @@ void FormatTokenLexer::tryMergePreviousTokens() {
if (Style.isCpp() && tryTransformTryUsageForC())
return;
- if (Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
+ if (Style.isJavaScript() || Style.isCSharp()) {
static const tok::TokenKind NullishCoalescingOperator[] = {tok::question,
tok::question};
static const tok::TokenKind NullPropagatingOperator[] = {tok::question,
@@ -152,7 +152,7 @@ void FormatTokenLexer::tryMergePreviousTokens() {
if (tryMergeNSStringLiteral())
return;
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal,
tok::equal};
@@ -920,8 +920,7 @@ FormatToken *FormatTokenLexer::getNextToken() {
// finds comments that contain a backslash followed by a line break, truncates
// the comment token at the backslash, and resets the lexer to restart behind
// the backslash.
- if ((Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Java) &&
+ if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) &&
FormatTok->is(tok::comment) && FormatTok->TokenText.startswith("//")) {
size_t BackslashPos = FormatTok->TokenText.find('\\');
while (BackslashPos != StringRef::npos) {
@@ -982,7 +981,7 @@ FormatToken *FormatTokenLexer::getNextToken() {
tok::kw_operator)) {
FormatTok->Tok.setKind(tok::identifier);
FormatTok->Tok.setIdentifierInfo(nullptr);
- } else if (Style.Language == FormatStyle::LK_JavaScript &&
+ } else if (Style.isJavaScript() &&
FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
tok::kw_operator)) {
FormatTok->Tok.setKind(tok::identifier);
@@ -1060,14 +1059,12 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
Tok.Tok.setKind(tok::string_literal);
Tok.IsUnterminatedLiteral = true;
- } else if (Style.Language == FormatStyle::LK_JavaScript &&
- Tok.TokenText == "''") {
+ } else if (Style.isJavaScript() && Tok.TokenText == "''") {
Tok.Tok.setKind(tok::string_literal);
}
}
- if ((Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Proto ||
+ if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) &&
Tok.is(tok::char_constant)) {
Tok.Tok.setKind(tok::string_literal);
diff --git a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index def551f863cd..38ab5b9df76d 100644
--- a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -180,9 +180,13 @@ getNamespaceToken(const AnnotatedLine *Line,
if (NamespaceTok->is(tok::l_brace)) {
// "namespace" keyword can be on the line preceding '{', e.g. in styles
// where BraceWrapping.AfterNamespace is true.
- if (StartLineIndex > 0)
+ if (StartLineIndex > 0) {
NamespaceTok = AnnotatedLines[StartLineIndex - 1]->First;
+ if (AnnotatedLines[StartLineIndex - 1]->endsWith(tok::semi))
+ return nullptr;
+ }
}
+
return NamespaceTok->getNamespaceToken();
}
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index a94d8cdc3b04..505a7250572b 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -242,7 +242,7 @@ private:
bool OperatorCalledAsMemberFunction =
Prev->Previous && Prev->Previous->isOneOf(tok::period, tok::arrow);
Contexts.back().IsExpression = OperatorCalledAsMemberFunction;
- } else if (Style.Language == FormatStyle::LK_JavaScript &&
+ } else if (Style.isJavaScript() &&
(Line.startsWith(Keywords.kw_type, tok::identifier) ||
Line.startsWith(tok::kw_export, Keywords.kw_type,
tok::identifier))) {
@@ -256,13 +256,13 @@ private:
Left->Previous->is(TT_BinaryOperator))) {
// static_assert, if and while usually contain expressions.
Contexts.back().IsExpression = true;
- } else if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous &&
+ } else if (Style.isJavaScript() && Left->Previous &&
(Left->Previous->is(Keywords.kw_function) ||
(Left->Previous->endsSequence(tok::identifier,
Keywords.kw_function)))) {
// function(...) or function f(...)
Contexts.back().IsExpression = false;
- } else if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous &&
+ } else if (Style.isJavaScript() && Left->Previous &&
Left->Previous->is(TT_JsTypeColon)) {
// let x: (SomeType);
Contexts.back().IsExpression = false;
@@ -582,7 +582,7 @@ private:
Left->setType(TT_InlineASMSymbolicNameLSquare);
} else if (IsCpp11AttributeSpecifier) {
Left->setType(TT_AttributeSquare);
- } else if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
+ } else if (Style.isJavaScript() && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->setType(TT_JsComputedPropertyName);
@@ -646,8 +646,7 @@ private:
ScopedContextCreator ContextCreator(*this, tok::l_square, BindingIncrease);
Contexts.back().IsExpression = true;
- if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
- Parent->is(TT_JsTypeColon))
+ if (Style.isJavaScript() && Parent && Parent->is(TT_JsTypeColon))
Contexts.back().IsExpression = false;
Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
@@ -774,7 +773,7 @@ private:
Contexts.back().ColonIsDictLiteral = true;
if (Left->is(BK_BracedInit))
Contexts.back().IsExpression = true;
- if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous &&
+ if (Style.isJavaScript() && Left->Previous &&
Left->Previous->is(TT_JsTypeColon))
Contexts.back().IsExpression = false;
@@ -808,12 +807,11 @@ private:
Previous->is(tok::string_literal))
Previous->setType(TT_SelectorName);
}
- if (CurrentToken->is(tok::colon) ||
- Style.Language == FormatStyle::LK_JavaScript)
+ if (CurrentToken->is(tok::colon) || Style.isJavaScript())
Left->setType(TT_DictLiteral);
}
if (CurrentToken->is(tok::comma)) {
- if (Style.Language == FormatStyle::LK_JavaScript)
+ if (Style.isJavaScript())
Left->setType(TT_DictLiteral);
++CommaCount;
}
@@ -879,7 +877,7 @@ private:
if (!Tok->Previous)
return false;
// Colons from ?: are handled in parseConditional().
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
if (Contexts.back().ColonIsForRangeExpr || // colon in for loop
(Contexts.size() == 1 && // switch/case labels
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) ||
@@ -979,8 +977,7 @@ private:
case tok::amp:
// | and & in declarations/type expressions represent union and
// intersection types, respectively.
- if (Style.Language == FormatStyle::LK_JavaScript &&
- !Contexts.back().IsExpression)
+ if (Style.isJavaScript() && !Contexts.back().IsExpression)
Tok->setType(TT_JsTypeOperator);
break;
case tok::kw_if:
@@ -995,7 +992,7 @@ private:
}
break;
case tok::kw_for:
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
// x.for and {for: ...}
if ((Tok->Previous && Tok->Previous->is(tok::period)) ||
(Tok->Next && Tok->Next->is(tok::colon)))
@@ -1106,7 +1103,7 @@ private:
CurrentToken->Previous->setType(TT_OverloadedOperator);
break;
case tok::question:
- if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next &&
+ if (Style.isJavaScript() && Tok->Next &&
Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
tok::r_brace)) {
// Question marks before semicolons, colons, etc. indicate optional
@@ -1119,7 +1116,7 @@ private:
// Declarations cannot be conditional expressions, this can only be part
// of a type declaration.
if (Line.MustBeDeclaration && !Contexts.back().IsExpression &&
- Style.Language == FormatStyle::LK_JavaScript)
+ Style.isJavaScript())
break;
if (Style.isCSharp()) {
// `Type?)`, `Type?>`, `Type? name;` and `Type? name =` can only be
@@ -1252,7 +1249,7 @@ private:
if (!CurrentToken)
return Type;
- if (Style.Language == FormatStyle::LK_JavaScript && IsFirstToken) {
+ if (Style.isJavaScript() && IsFirstToken) {
// JavaScript files can contain shebang lines of the form:
// #!/usr/bin/env node
// Treat these like C++ #include directives.
@@ -1354,14 +1351,13 @@ public:
bool ImportStatement = false;
// import {...} from '...';
- if (Style.Language == FormatStyle::LK_JavaScript &&
- CurrentToken->is(Keywords.kw_import))
+ if (Style.isJavaScript() && CurrentToken->is(Keywords.kw_import))
ImportStatement = true;
while (CurrentToken) {
if (CurrentToken->is(tok::kw_virtual))
KeywordVirtualFound = true;
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
// export {...} from '...';
// An export followed by "from 'some string';" is a re-export from
// another module identified by a URI and is treated as a
@@ -1504,7 +1500,7 @@ private:
!Line.First->isOneOf(tok::kw_template, tok::kw_using, tok::kw_return) &&
// Type aliases use `type X = ...;` in TypeScript and can be exported
// using `export type ...`.
- !(Style.Language == FormatStyle::LK_JavaScript &&
+ !(Style.isJavaScript() &&
(Line.startsWith(Keywords.kw_type, tok::identifier) ||
Line.startsWith(tok::kw_export, Keywords.kw_type,
tok::identifier))) &&
@@ -1633,11 +1629,11 @@ private:
// The token type is already known.
return;
- if ((Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) &&
+ if ((Style.isJavaScript() || Style.isCSharp()) &&
Current.is(tok::exclaim)) {
if (Current.Previous) {
bool IsIdentifier =
- Style.Language == FormatStyle::LK_JavaScript
+ Style.isJavaScript()
? Keywords.IsJavaScriptIdentifier(
*Current.Previous, /* AcceptIdentifierName= */ true)
: Current.Previous->is(tok::identifier);
@@ -1679,7 +1675,7 @@ private:
Current.setType(TT_LambdaArrow);
} else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
Current.NestingLevel == 0 &&
- !Current.Previous->is(tok::kw_operator)) {
+ !Current.Previous->isOneOf(tok::kw_operator, tok::identifier)) {
// not auto operator->() -> xxx;
Current.setType(TT_TrailingReturnArrow);
} else if (Current.is(tok::arrow) && Current.Previous &&
@@ -1704,8 +1700,8 @@ private:
} else if (Current.isOneOf(tok::exclaim, tok::tilde)) {
Current.setType(TT_UnaryOperator);
} else if (Current.is(tok::question)) {
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Line.MustBeDeclaration && !Contexts.back().IsExpression) {
+ if (Style.isJavaScript() && Line.MustBeDeclaration &&
+ !Contexts.back().IsExpression) {
// In JavaScript, `interface X { foo?(): bar; }` is an optional method
// on the interface, not a ternary expression.
Current.setType(TT_JsTypeOptionalQuestion);
@@ -1748,8 +1744,7 @@ private:
Current.setType(TT_FunctionAnnotationRParen);
}
}
- } else if (Current.is(tok::at) && Current.Next &&
- Style.Language != FormatStyle::LK_JavaScript &&
+ } else if (Current.is(tok::at) && Current.Next && !Style.isJavaScript() &&
Style.Language != FormatStyle::LK_Java) {
// In Java & JavaScript, "@..." is a decorator or annotation. In ObjC, it
// marks declarations and properties that need special formatting.
@@ -1796,7 +1791,7 @@ private:
// function declaration have been found.
Current.setType(TT_TrailingAnnotation);
} else if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ Style.isJavaScript()) &&
Current.Previous) {
if (Current.Previous->is(tok::at) &&
Current.isNot(Keywords.kw_interface)) {
@@ -1826,14 +1821,16 @@ private:
if (Tok.Previous->isOneOf(TT_LeadingJavaAnnotation, Keywords.kw_instanceof,
Keywords.kw_as))
return false;
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Tok.Previous->is(Keywords.kw_in))
+ if (Style.isJavaScript() && Tok.Previous->is(Keywords.kw_in))
return false;
// Skip "const" as it does not have an influence on whether this is a name.
FormatToken *PreviousNotConst = Tok.getPreviousNonComment();
- while (PreviousNotConst && PreviousNotConst->is(tok::kw_const))
- PreviousNotConst = PreviousNotConst->getPreviousNonComment();
+
+ // For javascript const can be like "let" or "var"
+ if (!Style.isJavaScript())
+ while (PreviousNotConst && PreviousNotConst->is(tok::kw_const))
+ PreviousNotConst = PreviousNotConst->getPreviousNonComment();
if (!PreviousNotConst)
return false;
@@ -1852,10 +1849,24 @@ private:
PreviousNotConst->is(TT_TypeDeclarationParen))
return true;
- return (!IsPPKeyword &&
- PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto)) ||
- PreviousNotConst->is(TT_PointerOrReference) ||
- PreviousNotConst->isSimpleTypeSpecifier();
+ // If is a preprocess keyword like #define.
+ if (IsPPKeyword)
+ return false;
+
+ // int a or auto a.
+ if (PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto))
+ return true;
+
+ // *a or &a or &&a.
+ if (PreviousNotConst->is(TT_PointerOrReference))
+ return true;
+
+ // MyClass a;
+ if (PreviousNotConst->isSimpleTypeSpecifier())
+ return true;
+
+ // const a = in JavaScript.
+ return (Style.isJavaScript() && PreviousNotConst->is(tok::kw_const));
}
/// Determine whether ')' is ending a cast.
@@ -2006,7 +2017,7 @@ private:
/// Return the type of the given token assuming it is * or &.
TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
bool InTemplateArgument) {
- if (Style.Language == FormatStyle::LK_JavaScript)
+ if (Style.isJavaScript())
return TT_BinaryOperator;
// && in C# must be a binary operator.
@@ -2034,9 +2045,8 @@ private:
tok::comma, tok::semi, tok::kw_return, tok::colon,
tok::kw_co_return, tok::kw_co_await,
tok::kw_co_yield, tok::equal, tok::kw_delete,
- tok::kw_sizeof, tok::kw_throw) ||
- PrevToken->isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
- TT_UnaryOperator, TT_CastRParen))
+ tok::kw_sizeof, tok::kw_throw, TT_BinaryOperator,
+ TT_ConditionalExpr, TT_UnaryOperator, TT_CastRParen))
return TT_UnaryOperator;
if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare))
@@ -2174,8 +2184,8 @@ public:
int CurrentPrecedence = getCurrentPrecedence();
- if (Current && Current->is(TT_SelectorName) &&
- Precedence == CurrentPrecedence) {
+ if (Precedence == CurrentPrecedence && Current &&
+ Current->is(TT_SelectorName)) {
if (LatestOperator)
addFakeParenthesis(Start, prec::Level(Precedence));
Start = Current;
@@ -2251,19 +2261,17 @@ private:
return 0;
if (Current->is(TT_RangeBasedForLoopColon))
return prec::Comma;
- if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
Current->is(Keywords.kw_instanceof))
return prec::Relational;
- if (Style.Language == FormatStyle::LK_JavaScript &&
+ if (Style.isJavaScript() &&
Current->isOneOf(Keywords.kw_in, Keywords.kw_as))
return prec::Relational;
if (Current->is(TT_BinaryOperator) || Current->is(tok::comma))
return Current->getPrecedence();
if (Current->isOneOf(tok::period, tok::arrow))
return PrecedenceArrowAndPeriod;
- if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
Current->isOneOf(Keywords.kw_extends, Keywords.kw_implements,
Keywords.kw_throws))
return 0;
@@ -2374,11 +2382,9 @@ static unsigned maxNestingDepth(const AnnotatedLine &Line) {
}
void TokenAnnotator::annotate(AnnotatedLine &Line) {
- for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
- E = Line.Children.end();
- I != E; ++I) {
- annotate(**I);
- }
+ for (auto &Child : Line.Children)
+ annotate(*Child);
+
AnnotatingParser Parser(Style, Line, Keywords);
Line.Type = Parser.parseLine();
@@ -2734,7 +2740,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 2;
if (Left.is(tok::comma) && Left.NestingLevel == 0)
return 3;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ } else if (Style.isJavaScript()) {
if (Right.is(Keywords.kw_function) && Left.isNot(tok::comma))
return 100;
if (Left.is(TT_JsTypeColon))
@@ -3024,8 +3030,14 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
(Left.is(TT_AttributeParen) || Left.canBePointerOrReferenceQualifier()))
return true;
+ if (Left.Tok.isLiteral())
+ return true;
+ // for (auto a = 0, b = 0; const auto & c : {1, 2, 3})
+ if (Left.isTypeOrIdentifier() && Right.Next && Right.Next->Next &&
+ Right.Next->Next->is(TT_RangeBasedForLoopColon))
+ return getTokenPointerOrReferenceAlignment(Right) !=
+ FormatStyle::PAS_Left;
return (
- Left.Tok.isLiteral() ||
(!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
(getTokenPointerOrReferenceAlignment(Right) != FormatStyle::PAS_Left ||
(Line.IsMultiVariableDeclStmt &&
@@ -3044,18 +3056,32 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
Right.canBePointerOrReferenceQualifier())
return true;
- return Right.Tok.isLiteral() || Right.is(TT_BlockComment) ||
- (Right.isOneOf(Keywords.kw_override, Keywords.kw_final) &&
- !Right.is(TT_StartOfName)) ||
- (Right.is(tok::l_brace) && Right.is(BK_Block)) ||
- (!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
- tok::l_paren) &&
- (getTokenPointerOrReferenceAlignment(Left) !=
- FormatStyle::PAS_Right &&
- !Line.IsMultiVariableDeclStmt) &&
- Left.Previous &&
- !Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
- tok::l_square));
+ // & 1
+ if (Right.Tok.isLiteral())
+ return true;
+ // & /* comment
+ if (Right.is(TT_BlockComment))
+ return true;
+ // foo() -> const Bar * override/final
+ if (Right.isOneOf(Keywords.kw_override, Keywords.kw_final) &&
+ !Right.is(TT_StartOfName))
+ return true;
+ // & {
+ if (Right.is(tok::l_brace) && Right.is(BK_Block))
+ return true;
+ // for (auto a = 0, b = 0; const auto& c : {1, 2, 3})
+ if (Left.Previous && Left.Previous->isTypeOrIdentifier() && Right.Next &&
+ Right.Next->is(TT_RangeBasedForLoopColon))
+ return getTokenPointerOrReferenceAlignment(Left) !=
+ FormatStyle::PAS_Right;
+ return !Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
+ tok::l_paren) &&
+ (getTokenPointerOrReferenceAlignment(Left) !=
+ FormatStyle::PAS_Right &&
+ !Line.IsMultiVariableDeclStmt) &&
+ Left.Previous &&
+ !Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
+ tok::l_square);
}
// Ensure right pointer alignment with ellipsis e.g. int *...P
if (Left.is(tok::ellipsis) && Left.Previous &&
@@ -3146,8 +3172,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Style.Cpp11BracedListStyle ? Style.SpacesInParentheses : true;
if (Left.is(TT_BlockComment))
// No whitespace in x(/*foo=*/1), except for JavaScript.
- return Style.Language == FormatStyle::LK_JavaScript ||
- !Left.TokenText.endswith("=*/");
+ return Style.isJavaScript() || !Left.TokenText.endswith("=*/");
// Space between template and attribute.
// e.g. template <typename T> [[nodiscard]] ...
@@ -3225,7 +3250,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
if (Left.is(tok::period) || Right.is(tok::period))
return false;
- if (Right.is(tok::hash) && Left.is(tok::identifier) && Left.TokenText == "L")
+ // u#str, U#str, L#str, u8#str
+ // uR#str, UR#str, LR#str, u8R#str
+ if (Right.is(tok::hash) && Left.is(tok::identifier) &&
+ (Left.TokenText == "L" || Left.TokenText == "u" ||
+ Left.TokenText == "U" || Left.TokenText == "u8" ||
+ Left.TokenText == "LR" || Left.TokenText == "uR" ||
+ Left.TokenText == "UR" || Left.TokenText == "u8R"))
return false;
if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
Left.MatchingParen->Previous &&
@@ -3396,7 +3427,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Keywords.kw_async, Keywords.kw_unsafe) &&
Right.is(tok::l_paren))
return true;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ } else if (Style.isJavaScript()) {
if (Left.is(TT_FatArrow))
return true;
// for await ( ...
@@ -3694,11 +3725,18 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
if (Style.isCSharp()) {
+ if (Left.is(TT_FatArrow) && Right.is(tok::l_brace) &&
+ Style.BraceWrapping.AfterFunction)
+ return true;
if (Right.is(TT_CSharpNamedArgumentColon) ||
Left.is(TT_CSharpNamedArgumentColon))
return false;
if (Right.is(TT_CSharpGenericTypeConstraint))
return true;
+ if (Right.Next && Right.Next->is(TT_FatArrow) &&
+ (Right.is(tok::numeric_constant) ||
+ (Right.is(tok::identifier) && Right.TokenText == "_")))
+ return true;
// Break after C# [...] and before public/protected/private/internal.
if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) &&
@@ -3710,7 +3748,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
Left.is(tok::r_square) && Right.is(tok::l_square))
return true;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ } else if (Style.isJavaScript()) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous &&
Left.Previous->is(tok::string_literal))
@@ -3800,15 +3838,13 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Style.JavaScriptWrapImports || Line.Type != LT_ImportStatement) {
const FormatToken *BeforeClosingBrace = nullptr;
if ((Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
- (Style.Language == FormatStyle::LK_JavaScript &&
- Left.is(tok::l_paren))) &&
+ (Style.isJavaScript() && Left.is(tok::l_paren))) &&
Left.isNot(BK_Block) && Left.MatchingParen)
BeforeClosingBrace = Left.MatchingParen->Previous;
else if (Right.MatchingParen &&
(Right.MatchingParen->isOneOf(tok::l_brace,
TT_ArrayInitializerLSquare) ||
- (Style.Language == FormatStyle::LK_JavaScript &&
- Right.MatchingParen->is(tok::l_paren))))
+ (Style.isJavaScript() && Right.MatchingParen->is(tok::l_paren))))
BeforeClosingBrace = &Left;
if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) ||
BeforeClosingBrace->isTrailingComment()))
@@ -3927,8 +3963,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
// Put multiple Java annotation on a new line.
- if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
Left.is(TT_LeadingJavaAnnotation) &&
Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) &&
(Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations))
@@ -4071,7 +4106,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
Keywords.kw_implements))
return true;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ } else if (Style.isJavaScript()) {
const FormatToken *NonComment = Right.getPreviousNonComment();
if (NonComment &&
NonComment->isOneOf(
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
index d099cfee9dea..f652a4e7088f 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -62,7 +62,7 @@ public:
Indent = Line.Level * IndentWidth + AdditionalIndent;
} else {
IndentForLevel.resize(Line.Level + 1);
- Indent = getIndent(IndentForLevel, Line.Level);
+ Indent = getIndent(Line.Level);
}
if (static_cast<int>(Indent) + Offset >= 0)
Indent += Offset;
@@ -97,8 +97,8 @@ private:
/// For example, 'public:' labels in classes are offset by 1 or 2
/// characters to the left from their level.
int getIndentOffset(const FormatToken &RootToken) {
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp())
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp())
return 0;
if (RootToken.isAccessSpecifier(false) ||
RootToken.isObjCAccessSpecifier() ||
@@ -118,12 +118,12 @@ private:
/// \p IndentForLevel must contain the indent for the level \c l
/// at \p IndentForLevel[l], or a value < 0 if the indent for
/// that level is unknown.
- unsigned getIndent(ArrayRef<int> IndentForLevel, unsigned Level) {
+ unsigned getIndent(unsigned Level) const {
if (IndentForLevel[Level] != -1)
return IndentForLevel[Level];
if (Level == 0)
return 0;
- return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth;
+ return getIndent(Level - 1) + Style.IndentWidth;
}
const FormatStyle &Style;
@@ -393,11 +393,24 @@ private:
// Try to merge a block with left brace wrapped that wasn't yet covered
if (TheLine->Last->is(tok::l_brace)) {
- return !Style.BraceWrapping.AfterFunction ||
- (I[1]->First->is(tok::r_brace) &&
- !Style.BraceWrapping.SplitEmptyRecord)
- ? tryMergeSimpleBlock(I, E, Limit)
- : 0;
+ const FormatToken *Tok = TheLine->First;
+ bool ShouldMerge = false;
+ if (Tok->is(tok::kw_typedef)) {
+ Tok = Tok->getNextNonComment();
+ assert(Tok);
+ }
+ if (Tok->isOneOf(tok::kw_class, tok::kw_struct)) {
+ ShouldMerge = !Style.BraceWrapping.AfterClass ||
+ (I[1]->First->is(tok::r_brace) &&
+ !Style.BraceWrapping.SplitEmptyRecord);
+ } else if (Tok->is(tok::kw_enum)) {
+ ShouldMerge = Style.AllowShortEnumsOnASingleLine;
+ } else {
+ ShouldMerge = !Style.BraceWrapping.AfterFunction ||
+ (I[1]->First->is(tok::r_brace) &&
+ !Style.BraceWrapping.SplitEmptyFunction);
+ }
+ return ShouldMerge ? tryMergeSimpleBlock(I, E, Limit) : 0;
}
// Try to merge a function block with left brace wrapped
if (I[1]->First->is(TT_FunctionLBrace) &&
@@ -584,6 +597,9 @@ private:
Keywords.kw___except)) {
if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
return 0;
+ if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Empty &&
+ !I[1]->First->is(tok::r_brace))
+ return 0;
// Don't merge when we can't except the case when
// the control statement block is empty
if (!Style.AllowShortIfStatementsOnASingleLine &&
@@ -1015,9 +1031,9 @@ private:
QueueType Queue;
// Insert start element into queue.
- StateNode *Node =
+ StateNode *RootNode =
new (Allocator.Allocate()) StateNode(InitialState, false, nullptr);
- Queue.push(QueueItem(OrderedPenalty(0, Count), Node));
+ Queue.push(QueueItem(OrderedPenalty(0, Count), RootNode));
++Count;
unsigned Penalty = 0;
@@ -1044,9 +1060,9 @@ private:
FormatDecision LastFormat = Node->State.NextToken->getDecision();
if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
- addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue);
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/false, Count, Queue);
if (LastFormat == FD_Unformatted || LastFormat == FD_Break)
- addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue);
+ addNextStateToQueue(Penalty, Node, /*NewLine=*/true, Count, Queue);
}
if (Queue.empty()) {
@@ -1072,7 +1088,7 @@ private:
/// Assume the current state is \p PreviousNode and has been reached with a
/// penalty of \p Penalty. Insert a line break if \p NewLine is \c true.
void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode,
- bool NewLine, unsigned *Count, QueueType *Queue) {
+ bool NewLine, unsigned &Count, QueueType &Queue) {
if (NewLine && !Indenter->canBreak(PreviousNode->State))
return;
if (!NewLine && Indenter->mustBreak(PreviousNode->State))
@@ -1085,8 +1101,8 @@ private:
Penalty += Indenter->addTokenToState(Node->State, NewLine, true);
- Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node));
- ++(*Count);
+ Queue.push(QueueItem(OrderedPenalty(Penalty, Count), Node));
+ ++Count;
}
/// Applies the best formatting by reconstructing the path in the
@@ -1184,8 +1200,7 @@ unsigned UnwrappedLineFormatter::format(
bool FitsIntoOneLine =
TheLine.Last->TotalLength + Indent <= ColumnLimit ||
(TheLine.Type == LT_ImportStatement &&
- (Style.Language != FormatStyle::LK_JavaScript ||
- !Style.JavaScriptWrapImports)) ||
+ (!Style.isJavaScript() || !Style.JavaScriptWrapImports)) ||
(Style.isCSharp() &&
TheLine.InPPDirective); // don't split #regions in C#
if (Style.ColumnLimit == 0)
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index 5b9fe267aae6..b6e55aab708f 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -366,8 +366,7 @@ void UnwrappedLineParser::parse() {
void UnwrappedLineParser::parseFile() {
// The top-level context in a file always has declarations, except for pre-
// processor directives and JavaScript files.
- bool MustBeDeclaration =
- !Line->InPPDirective && Style.Language != FormatStyle::LK_JavaScript;
+ bool MustBeDeclaration = !Line->InPPDirective && !Style.isJavaScript();
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
MustBeDeclaration);
if (Style.Language == FormatStyle::LK_TextProto)
@@ -478,8 +477,7 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
LLVM_FALLTHROUGH;
}
case tok::kw_case:
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Line->MustBeDeclaration) {
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// A 'case: string' style field declaration.
parseStructuralElement();
break;
@@ -528,7 +526,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
switch (Tok->Tok.getKind()) {
case tok::l_brace:
- if (Style.Language == FormatStyle::LK_JavaScript && PrevTok) {
+ if (Style.isJavaScript() && PrevTok) {
if (PrevTok->isOneOf(tok::colon, tok::less))
// A ':' indicates this code is in a type, or a braced list
// following a label in an object literal ({a: {b: 1}}).
@@ -581,7 +579,7 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// FIXME: Some of these do not apply to JS, e.g. "} {" can never be a
// braced list in JS.
ProbablyBracedList =
- (Style.Language == FormatStyle::LK_JavaScript &&
+ (Style.isJavaScript() &&
NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
Keywords.kw_as)) ||
(Style.isCpp() && NextTok->is(tok::l_paren)) ||
@@ -791,7 +789,7 @@ void UnwrappedLineParser::parseChildBlock() {
FormatTok->setBlockKind(BK_Block);
nextToken();
{
- bool SkipIndent = (Style.Language == FormatStyle::LK_JavaScript &&
+ bool SkipIndent = (Style.isJavaScript() &&
(isGoogScope(*Line) || isIIFE(*Line, Keywords)));
ScopedLineState LineState(*this);
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
@@ -1140,7 +1138,6 @@ void UnwrappedLineParser::parseModuleImport() {
}
addUnwrappedLine();
- return;
}
// readTokenWithJavaScriptASI reads the next token and terminates the current
@@ -1222,39 +1219,39 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
case tok::kw_public:
case tok::kw_protected:
case tok::kw_private:
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp())
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp())
nextToken();
else
parseAccessSpecifier();
return;
case tok::kw_if:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration)
// field/method declaration.
break;
parseIfThenElse();
return;
case tok::kw_for:
case tok::kw_while:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration)
// field/method declaration.
break;
parseForOrWhileLoop();
return;
case tok::kw_do:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration)
// field/method declaration.
break;
parseDoWhile();
return;
case tok::kw_switch:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration)
// 'switch: string' field declaration.
break;
parseSwitch();
return;
case tok::kw_default:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration)
// 'default: string' field declaration.
break;
nextToken();
@@ -1265,14 +1262,14 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// e.g. "default void f() {}" in a Java interface.
break;
case tok::kw_case:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration)
// 'case: string' field declaration.
break;
parseCaseLabel();
return;
case tok::kw_try:
case tok::kw___try:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration)
// field/method declaration.
break;
parseTryCatch();
@@ -1282,24 +1279,25 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
if (FormatTok->Tok.is(tok::string_literal)) {
nextToken();
if (FormatTok->Tok.is(tok::l_brace)) {
- if (!Style.IndentExternBlock) {
- if (Style.BraceWrapping.AfterExternBlock) {
- addUnwrappedLine();
- }
- unsigned AddLevels = Style.BraceWrapping.AfterExternBlock ? 1u : 0u;
- parseBlock(/*MustBeDeclaration=*/true, AddLevels);
- } else {
- unsigned AddLevels =
- Style.IndentExternBlock == FormatStyle::IEBS_Indent ? 1u : 0u;
- parseBlock(/*MustBeDeclaration=*/true, AddLevels);
- }
+ if (Style.BraceWrapping.AfterExternBlock)
+ addUnwrappedLine();
+ // Either we indent or for backwards compatibility we follow the
+ // AfterExternBlock style.
+ unsigned AddLevels =
+ (Style.IndentExternBlock == FormatStyle::IEBS_Indent) ||
+ (Style.BraceWrapping.AfterExternBlock &&
+ Style.IndentExternBlock ==
+ FormatStyle::IEBS_AfterExternBlock)
+ ? 1u
+ : 0u;
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels);
addUnwrappedLine();
return;
}
}
break;
case tok::kw_export:
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
parseJavaScriptEs6ImportExport();
return;
}
@@ -1325,7 +1323,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
return;
}
if (FormatTok->is(Keywords.kw_import)) {
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
parseJavaScriptEs6ImportExport();
return;
}
@@ -1437,10 +1435,10 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
break;
case tok::kw_concept:
parseConcept();
- break;
+ return;
case tok::kw_requires:
parseRequires();
- break;
+ return;
case tok::kw_enum:
// Ignore if this is part of "template <enum ...".
if (Previous && Previous->is(tok::less)) {
@@ -1479,7 +1477,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
if (Style.Language == FormatStyle::LK_Java && FormatTok &&
FormatTok->is(tok::kw_class))
nextToken();
- if (Style.Language == FormatStyle::LK_JavaScript && FormatTok &&
+ if (Style.isJavaScript() && FormatTok &&
FormatTok->Tok.getIdentifierInfo())
// JavaScript only has pseudo keywords, all keywords are allowed to
// appear in "IdentifierName" positions. See http://es5.github.io/#x7.6
@@ -1536,8 +1534,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// element continues.
break;
case tok::kw_try:
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Line->MustBeDeclaration) {
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
nextToken();
break;
@@ -1564,17 +1561,15 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// expressions (functions that are not on their own line) must not create
// a new unwrapped line, so they are special cased below.
size_t TokenCount = Line->Tokens.size();
- if (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->is(Keywords.kw_function) &&
+ if (Style.isJavaScript() && FormatTok->is(Keywords.kw_function) &&
(TokenCount > 1 || (TokenCount == 1 && !Line->Tokens.front().Tok->is(
Keywords.kw_async)))) {
tryToParseJSFunction();
break;
}
- if ((Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Java) &&
+ if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) &&
FormatTok->is(Keywords.kw_interface)) {
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
// In JavaScript/TypeScript, "interface" can be used as a standalone
// identifier, e.g. in `var interface = 1;`. If "interface" is
// followed by another identifier, it is very like to be an actual
@@ -1610,7 +1605,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// JS doesn't have macros, and within classes colons indicate fields, not
// labels.
- if (Style.Language == FormatStyle::LK_JavaScript)
+ if (Style.isJavaScript())
break;
TokenCount = Line->Tokens.size();
@@ -1641,19 +1636,9 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
break;
}
case tok::equal:
- // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
- // TT_FatArrow. They always start an expression or a child block if
- // followed by a curly brace.
- if (FormatTok->is(TT_FatArrow)) {
- nextToken();
- if (FormatTok->is(tok::l_brace)) {
- // C# may break after => if the next character is a newline.
- if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) {
- // calling `addUnwrappedLine()` here causes odd parsing errors.
- FormatTok->MustBreakBefore = true;
- }
- parseChildBlock();
- }
+ if ((Style.isJavaScript() || Style.isCSharp()) &&
+ FormatTok->is(TT_FatArrow)) {
+ tryToParseChildBlock();
break;
}
@@ -1729,7 +1714,7 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
// Try to parse the property accessor:
// https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/classes-and-structs/properties
Tokens->setPosition(StoredPosition);
- if (!IsTrivialPropertyAccessor && Style.BraceWrapping.AfterFunction == true)
+ if (!IsTrivialPropertyAccessor && Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
nextToken();
do {
@@ -1944,6 +1929,19 @@ bool UnwrappedLineParser::tryToParseBracedList() {
return true;
}
+bool UnwrappedLineParser::tryToParseChildBlock() {
+ assert(Style.isJavaScript() || Style.isCSharp());
+ assert(FormatTok->is(TT_FatArrow));
+ // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType TT_FatArrow.
+ // They always start an expression or a child block if followed by a curly
+ // brace.
+ nextToken();
+ if (FormatTok->isNot(tok::l_brace))
+ return false;
+ parseChildBlock();
+ return true;
+}
+
bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
bool IsEnum,
tok::TokenKind ClosingBraceKind) {
@@ -1952,38 +1950,15 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
// FIXME: Once we have an expression parser in the UnwrappedLineParser,
// replace this by using parseAssignmentExpression() inside.
do {
- if (Style.isCSharp()) {
- // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
- // TT_FatArrow. They always start an expression or a child block if
- // followed by a curly brace.
- if (FormatTok->is(TT_FatArrow)) {
- nextToken();
- if (FormatTok->is(tok::l_brace)) {
- // C# may break after => if the next character is a newline.
- if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) {
- // calling `addUnwrappedLine()` here causes odd parsing errors.
- FormatTok->MustBreakBefore = true;
- }
- parseChildBlock();
- continue;
- }
- }
- }
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isCSharp() && FormatTok->is(TT_FatArrow) &&
+ tryToParseChildBlock())
+ continue;
+ if (Style.isJavaScript()) {
if (FormatTok->is(Keywords.kw_function) ||
FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)) {
tryToParseJSFunction();
continue;
}
- if (FormatTok->is(TT_FatArrow)) {
- nextToken();
- // Fat arrows can be followed by simple expressions or by child blocks
- // in curly braces.
- if (FormatTok->is(tok::l_brace)) {
- parseChildBlock();
- continue;
- }
- }
if (FormatTok->is(tok::l_brace)) {
// Could be a method inside of a braced list `{a() { return 1; }}`.
if (tryToParseBracedList())
@@ -1998,12 +1973,6 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
return !HasError;
}
switch (FormatTok->Tok.getKind()) {
- case tok::caret:
- nextToken();
- if (FormatTok->is(tok::l_brace)) {
- parseChildBlock();
- }
- break;
case tok::l_square:
if (Style.isCSharp())
parseSquare();
@@ -2014,7 +1983,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
parseParens();
// JavaScript can just have free standing methods and getters/setters in
// object literals. Detect them by a "{" following ")".
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
@@ -2041,7 +2010,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
// lists (in so-called TypeMemberLists). Thus, the semicolon cannot be
// used for error recovery if we have otherwise determined that this is
// a braced list.
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
nextToken();
break;
}
@@ -2095,18 +2064,18 @@ void UnwrappedLineParser::parseParens() {
break;
case tok::equal:
if (Style.isCSharp() && FormatTok->is(TT_FatArrow))
- parseStructuralElement();
+ tryToParseChildBlock();
else
nextToken();
break;
case tok::kw_class:
- if (Style.Language == FormatStyle::LK_JavaScript)
+ if (Style.isJavaScript())
parseRecord(/*ParseAsExpr=*/true);
else
nextToken();
break;
case tok::identifier:
- if (Style.Language == FormatStyle::LK_JavaScript &&
+ if (Style.isJavaScript() &&
(FormatTok->is(Keywords.kw_function) ||
FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)))
tryToParseJSFunction();
@@ -2160,15 +2129,22 @@ void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
}
void UnwrappedLineParser::parseIfThenElse() {
+ auto HandleAttributes = [this]() {
+ // Handle AttributeMacro, e.g. `if (x) UNLIKELY`.
+ if (FormatTok->is(TT_AttributeMacro))
+ nextToken();
+ // Handle [[likely]] / [[unlikely]] attributes.
+ if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute())
+ parseSquare();
+ };
+
assert(FormatTok->Tok.is(tok::kw_if) && "'if' expected");
nextToken();
if (FormatTok->Tok.isOneOf(tok::kw_constexpr, tok::identifier))
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
- // handle [[likely]] / [[unlikely]]
- if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute())
- parseSquare();
+ HandleAttributes();
bool NeedsUnwrappedLine = false;
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
@@ -2185,9 +2161,7 @@ void UnwrappedLineParser::parseIfThenElse() {
}
if (FormatTok->Tok.is(tok::kw_else)) {
nextToken();
- // handle [[likely]] / [[unlikely]]
- if (FormatTok->Tok.is(tok::l_square) && tryToParseSimpleAttribute())
- parseSquare();
+ HandleAttributes();
if (FormatTok->Tok.is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
@@ -2272,8 +2246,7 @@ void UnwrappedLineParser::parseTryCatch() {
nextToken();
if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except,
tok::kw___finally) ||
- ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
FormatTok->is(Keywords.kw_finally)) ||
(FormatTok->Tok.isObjCAtKeyword(tok::objc_catch) ||
FormatTok->Tok.isObjCAtKeyword(tok::objc_finally))))
@@ -2396,8 +2369,7 @@ void UnwrappedLineParser::parseForOrWhileLoop() {
"'for', 'while' or foreach macro expected");
nextToken();
// JS' for await ( ...
- if (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->is(Keywords.kw_await))
+ if (Style.isJavaScript() && FormatTok->is(Keywords.kw_await))
nextToken();
if (Style.isCpp() && FormatTok->is(tok::kw_co_await))
nextToken();
@@ -2643,8 +2615,7 @@ bool UnwrappedLineParser::parseEnum() {
// In TypeScript, "enum" can also be used as property name, e.g. in interface
// declarations. An "enum" keyword followed by a colon would be a syntax
// error and thus assume it is just an identifier.
- if (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->isOneOf(tok::colon, tok::question))
+ if (Style.isJavaScript() && FormatTok->isOneOf(tok::colon, tok::question))
return false;
// In protobuf, "enum" can be used as a field name.
@@ -2716,8 +2687,8 @@ bool UnwrappedLineParser::parseStructLike() {
// record declaration or definition can start a structural element.
parseRecord();
// This does not apply to Java, JavaScript and C#.
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp()) {
if (FormatTok->is(tok::semi))
nextToken();
addUnwrappedLine();
@@ -2846,10 +2817,9 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash,
tok::kw___attribute, tok::kw___declspec,
tok::kw_alignas, tok::l_square, tok::r_square) ||
- ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
FormatTok->isOneOf(tok::period, tok::comma))) {
- if (Style.Language == FormatStyle::LK_JavaScript &&
+ if (Style.isJavaScript() &&
FormatTok->isOneOf(Keywords.kw_extends, Keywords.kw_implements)) {
// JavaScript/TypeScript supports inline object types in
// extends/implements positions:
@@ -3323,7 +3293,7 @@ void UnwrappedLineParser::nextToken(int LevelDifference) {
flushComments(isOnNewLine(*FormatTok));
pushToken(FormatTok);
FormatToken *Previous = FormatTok;
- if (Style.Language != FormatStyle::LK_JavaScript)
+ if (!Style.isJavaScript())
readToken(LevelDifference);
else
readTokenWithJavaScriptASI();
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
index b4c082654597..0c79723d50fc 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
@@ -138,6 +138,7 @@ private:
// https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/keywords/where-generic-type-constraint
void parseCSharpGenericTypeConstraint();
bool tryToParseLambda();
+ bool tryToParseChildBlock();
bool tryToParseLambdaIntroducer();
bool tryToParsePropertyAccessor();
void tryToParseJSFunction();
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index fae8a1c3fdc6..96a66da0f82b 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -1282,9 +1282,12 @@ void WhitespaceManager::generateChanges() {
C.EscapedNewlineColumn);
else
appendNewlineText(ReplacementText, C.NewlinesBefore);
+ // FIXME: This assert should hold if we computed the column correctly.
+ // assert((int)C.StartOfTokenColumn >= C.Spaces);
appendIndentText(
ReplacementText, C.Tok->IndentLevel, std::max(0, C.Spaces),
- C.StartOfTokenColumn - std::max(0, C.Spaces), C.IsAligned);
+ std::max((int)C.StartOfTokenColumn, C.Spaces) - std::max(0, C.Spaces),
+ C.IsAligned);
ReplacementText.append(C.CurrentLinePrefix);
storeReplacement(C.OriginalWhitespaceRange, ReplacementText);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index 1432607204bd..31e7ea3d243d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -1154,12 +1154,12 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// Remove any macro definitions that are explicitly ignored by the module.
// They aren't supposed to affect how the module is built anyway.
HeaderSearchOptions &HSOpts = Invocation->getHeaderSearchOpts();
- llvm::erase_if(
- PPOpts.Macros, [&HSOpts](const std::pair<std::string, bool> &def) {
- StringRef MacroDef = def.first;
- return HSOpts.ModulesIgnoreMacros.count(
- llvm::CachedHashString(MacroDef.split('=').first)) > 0;
- });
+ llvm::erase_if(PPOpts.Macros,
+ [&HSOpts](const std::pair<std::string, bool> &def) {
+ StringRef MacroDef = def.first;
+ return HSOpts.ModulesIgnoreMacros.contains(
+ llvm::CachedHashString(MacroDef.split('=').first));
+ });
// If the original compiler invocation had -fmodule-name, pass it through.
Invocation->getLangOpts()->ModuleName =
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index c104a6f40e20..b71addd84bfd 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -770,9 +770,7 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
static void getAllNoBuiltinFuncValues(ArgList &Args,
std::vector<std::string> &Funcs) {
std::vector<std::string> Values = Args.getAllArgValues(OPT_fno_builtin_);
- auto BuiltinEnd = llvm::partition(Values, [](const std::string FuncName) {
- return Builtin::Context::isBuiltinFunc(FuncName);
- });
+ auto BuiltinEnd = llvm::partition(Values, Builtin::Context::isBuiltinFunc);
Funcs.insert(Funcs.end(), Values.begin(), BuiltinEnd);
}
@@ -1285,7 +1283,7 @@ static std::string serializeXRayInstrumentationBundle(const XRayInstrSet &S) {
std::string Buffer;
llvm::raw_string_ostream OS(Buffer);
llvm::interleave(BundleParts, OS, [&OS](StringRef Part) { OS << Part; }, ",");
- return OS.str();
+ return Buffer;
}
// Set the profile kind using fprofile-instrument-use-path.
@@ -4123,6 +4121,13 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
{std::string(Split.first), std::string(Split.second)});
}
+ // Error if -mvscale-min is unbounded.
+ if (Arg *A = Args.getLastArg(options::OPT_mvscale_min_EQ)) {
+ unsigned VScaleMin;
+ if (StringRef(A->getValue()).getAsInteger(10, VScaleMin) || VScaleMin == 0)
+ Diags.Report(diag::err_cc1_unbounded_vscale_min);
+ }
+
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -4513,7 +4518,7 @@ bool CompilerInvocation::CreateFromArgsImpl(
// Store the command-line for using in the CodeView backend.
Res.getCodeGenOpts().Argv0 = Argv0;
- Res.getCodeGenOpts().CommandLineArgs = CommandLineArgs;
+ append_range(Res.getCodeGenOpts().CommandLineArgs, CommandLineArgs);
FixupInvocation(Res, Diags, Args, DashX);
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
index 0c153446142e..629f99110661 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
@@ -500,8 +500,12 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
- if (LangOpts.CUDA && !LangOpts.HIP)
- Builder.defineMacro("__CUDA__");
+ if (LangOpts.CUDA) {
+ if (LangOpts.GPURelocatableDeviceCode)
+ Builder.defineMacro("__CLANG_RDC__");
+ if (!LangOpts.HIP)
+ Builder.defineMacro("__CUDA__");
+ }
if (LangOpts.HIP) {
Builder.defineMacro("__HIP__");
Builder.defineMacro("__HIPCC__");
diff --git a/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp b/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp
index ea737e6891bf..2d5145d0c54c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp
@@ -133,5 +133,5 @@ std::string TestModuleFileExtension::str() const {
llvm::raw_string_ostream OS(Buffer);
OS << BlockName << ":" << MajorVersion << ":" << MinorVersion << ":" << Hashed
<< ":" << UserInfo;
- return OS.str();
+ return Buffer;
}
diff --git a/contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h b/contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h
new file mode 100644
index 000000000000..17699d8d11dd
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h
@@ -0,0 +1,184 @@
+/*===---- arm_neon_sve_bridge.h - ARM NEON SVE Bridge intrinsics -----------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_NEON_SVE_BRIDGE_H
+#define __ARM_NEON_SVE_BRIDGE_H
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Function attributes */
+#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
+#define __aio \
+ static __inline__ \
+ __attribute__((__always_inline__, __nodebug__, __overloadable__))
+
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8)))
+svint8_t svset_neonq(svint8_t, int8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16)))
+svint16_t svset_neonq(svint16_t, int16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32)))
+svint32_t svset_neonq(svint32_t, int32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64)))
+svint64_t svset_neonq(svint64_t, int64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8)))
+svuint8_t svset_neonq(svuint8_t, uint8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16)))
+svuint16_t svset_neonq(svuint16_t, uint16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32)))
+svuint32_t svset_neonq(svuint32_t, uint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64)))
+svuint64_t svset_neonq(svuint64_t, uint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16)))
+svfloat16_t svset_neonq(svfloat16_t, float16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32)))
+svfloat32_t svset_neonq(svfloat32_t, float32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64)))
+svfloat64_t svset_neonq(svfloat64_t, float64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8)))
+svint8_t svset_neonq_s8(svint8_t, int8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16)))
+svint16_t svset_neonq_s16(svint16_t, int16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32)))
+svint32_t svset_neonq_s32(svint32_t, int32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64)))
+svint64_t svset_neonq_s64(svint64_t, int64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8)))
+svuint8_t svset_neonq_u8(svuint8_t, uint8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16)))
+svuint16_t svset_neonq_u16(svuint16_t, uint16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32)))
+svuint32_t svset_neonq_u32(svuint32_t, uint32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64)))
+svuint64_t svset_neonq_u64(svuint64_t, uint64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16)))
+svfloat16_t svset_neonq_f16(svfloat16_t, float16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32)))
+svfloat32_t svset_neonq_f32(svfloat32_t, float32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64)))
+svfloat64_t svset_neonq_f64(svfloat64_t, float64x2_t);
+
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8)))
+int8x16_t svget_neonq(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16)))
+int16x8_t svget_neonq(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32)))
+int32x4_t svget_neonq(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64)))
+int64x2_t svget_neonq(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8)))
+uint8x16_t svget_neonq(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16)))
+uint16x8_t svget_neonq(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32)))
+uint32x4_t svget_neonq(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64)))
+uint64x2_t svget_neonq(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16)))
+float16x8_t svget_neonq(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32)))
+float32x4_t svget_neonq(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64)))
+float64x2_t svget_neonq(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8)))
+int8x16_t svget_neonq_s8(svint8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16)))
+int16x8_t svget_neonq_s16(svint16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32)))
+int32x4_t svget_neonq_s32(svint32_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64)))
+int64x2_t svget_neonq_s64(svint64_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8)))
+uint8x16_t svget_neonq_u8(svuint8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16)))
+uint16x8_t svget_neonq_u16(svuint16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32)))
+uint32x4_t svget_neonq_u32(svuint32_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64)))
+uint64x2_t svget_neonq_u64(svuint64_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16)))
+float16x8_t svget_neonq_f16(svfloat16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32)))
+float32x4_t svget_neonq_f32(svfloat32_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64)))
+float64x2_t svget_neonq_f64(svfloat64_t);
+
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8)))
+svint8_t svdup_neonq(int8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16)))
+svint16_t svdup_neonq(int16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32)))
+svint32_t svdup_neonq(int32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64)))
+svint64_t svdup_neonq(int64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8)))
+svuint8_t svdup_neonq(uint8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16)))
+svuint16_t svdup_neonq(uint16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32)))
+svuint32_t svdup_neonq(uint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64)))
+svuint64_t svdup_neonq(uint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16)))
+svfloat16_t svdup_neonq(float16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32)))
+svfloat32_t svdup_neonq(float32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64)))
+svfloat64_t svdup_neonq(float64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8)))
+svint8_t svdup_neonq_s8(int8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16)))
+svint16_t svdup_neonq_s16(int16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32)))
+svint32_t svdup_neonq_s32(int32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64)))
+svint64_t svdup_neonq_s64(int64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8)))
+svuint8_t svdup_neonq_u8(uint8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16)))
+svuint16_t svdup_neonq_u16(uint16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32)))
+svuint32_t svdup_neonq_u32(uint32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64)))
+svuint64_t svdup_neonq_u64(uint64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16)))
+svfloat16_t svdup_neonq_f16(float16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32)))
+svfloat32_t svdup_neonq_f32(float32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64)))
+svfloat64_t svdup_neonq_f64(float64x2_t);
+
+#if defined(__ARM_FEATURE_SVE_BF16)
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16)))
+svbfloat16_t svset_neonq(svbfloat16_t, bfloat16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16)))
+svbfloat16_t svset_neonq_bf16(svbfloat16_t, bfloat16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16)))
+bfloat16x8_t svget_neonq(svbfloat16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16)))
+bfloat16x8_t svget_neonq_bf16(svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16)))
+svbfloat16_t svdup_neonq(bfloat16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16)))
+svbfloat16_t svdup_neonq_bf16(bfloat16x8_t);
+#endif // defined(__ARM_FEATURE_SVE_BF16)
+
+#undef __ai
+#undef __aio
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif //__ARM_NEON_SVE_BRIDGE_H
diff --git a/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h b/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h
index cdffd93bb859..2642f3c8428d 100644
--- a/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h
+++ b/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h
@@ -8003,17 +8003,6 @@
#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
#endif /* __HEXAGON_ARCH___ >= 62 */
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
- Assembly Syntax: Vd32=vmem(Rt32):nt
- C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
- Instruction Type: MAPPING
- Execution Slots: SLOT0123
- ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
#if __HEXAGON_ARCH__ >= 65
/* ==========================================================================
Assembly Syntax: Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
diff --git a/contrib/llvm-project/clang/lib/Headers/hexagon_types.h b/contrib/llvm-project/clang/lib/Headers/hexagon_types.h
index 6958809418d8..029727cc4817 100644
--- a/contrib/llvm-project/clang/lib/Headers/hexagon_types.h
+++ b/contrib/llvm-project/clang/lib/Headers/hexagon_types.h
@@ -1177,37 +1177,6 @@ private:
#endif /* __cplusplus */
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
- // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
- // types are 16 bytes and 32 bytes for pairs.
- typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
- __attribute__((aligned(128)));
-
- typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
- __attribute__((aligned(128)));
-
- typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
- __attribute__((aligned(128)));
-
- typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
- __attribute__((aligned(256)));
-
- typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
- __attribute__((aligned(4)));
-
- typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
- __attribute__((aligned(4)));
-
- #define Q6S_VectorPredPair HEXAGON_VecPred256
- #define Q6S_VectorPred HEXAGON_VecPred128
- #define Q6S_Vector HEXAGON_Vect1024
- #define Q6S_VectorPair HEXAGON_Vect2048
- #define Q6S_UVector HEXAGON_UVect1024
- #define Q6S_UVectorPair HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
// V65 Vector types
#if __HVX_ARCH__ >= 65
#if defined __HVX__ && (__HVX_LENGTH__ == 128)
@@ -1256,7 +1225,6 @@ private:
#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */
#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
#endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
/* Predicates */
diff --git a/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h b/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h
index 41ce7a6b93e9..7e3679a38b2c 100644
--- a/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h
+++ b/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h
@@ -9,7 +9,6 @@
//===----------------------------------------------------------------------===//
-
#ifndef _HVX_HEXAGON_PROTOS_H_
#define _HVX_HEXAGON_PROTOS_H_ 1
@@ -28,7 +27,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
+#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -39,7 +38,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
+#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -50,7 +49,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
+#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -61,7 +60,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
+#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -72,7 +71,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
+#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -83,7 +82,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
+#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -94,7 +93,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
+#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -105,7 +104,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
+#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -116,7 +115,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
+#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -127,7 +126,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
+#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -138,7 +137,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
+#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -149,7 +148,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
+#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -160,7 +159,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
+#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -171,7 +170,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
+#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -182,7 +181,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
+#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -193,7 +192,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
+#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -204,7 +203,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
+#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -215,7 +214,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
+#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -226,7 +225,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
+#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -237,7 +236,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
+#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -248,7 +247,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
+#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -259,7 +258,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
+#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -270,7 +269,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
+#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -281,7 +280,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
+#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -292,7 +291,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
+#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -303,7 +302,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
+#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -314,7 +313,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
+#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -325,7 +324,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
+#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -336,7 +335,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
+#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -347,7 +346,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
+#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -358,7 +357,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
+#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -369,7 +368,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
+#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -380,7 +379,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
+#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -391,7 +390,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
+#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -402,7 +401,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
+#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -413,7 +412,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
+#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -424,7 +423,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
+#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -435,7 +434,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
+#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -446,7 +445,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
+#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -457,7 +456,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
+#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -468,7 +467,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
+#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -479,7 +478,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
+#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -490,7 +489,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
+#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -501,7 +500,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
+#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -512,7 +511,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
+#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -523,7 +522,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
+#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -534,7 +533,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
+#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -545,7 +544,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
+#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -556,7 +555,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
+#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -567,7 +566,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
+#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -578,7 +577,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
+#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -589,7 +588,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
+#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -600,7 +599,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
+#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -611,7 +610,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
+#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -622,7 +621,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
+#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -633,7 +632,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
+#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -644,7 +643,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
+#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -655,7 +654,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
+#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -666,7 +665,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
+#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -677,7 +676,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
+#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -688,7 +687,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
+#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -699,7 +698,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
+#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -710,7 +709,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
+#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -721,7 +720,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
+#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -732,7 +731,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
+#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -743,7 +742,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
+#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -754,7 +753,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
+#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -765,7 +764,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
+#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -776,7 +775,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
+#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -787,7 +786,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
+#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -798,7 +797,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
+#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -809,7 +808,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
+#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -820,7 +819,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
+#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -831,7 +830,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
+#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -842,7 +841,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
+#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -853,7 +852,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
+#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -864,7 +863,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
+#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -875,7 +874,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
+#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -886,7 +885,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
+#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -897,7 +896,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
+#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -908,7 +907,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
+#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -919,7 +918,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
+#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -930,7 +929,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
+#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -941,7 +940,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
+#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)()
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -952,7 +951,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
+#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -963,7 +962,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
+#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -974,7 +973,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
+#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -985,7 +984,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
+#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -996,7 +995,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
+#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1007,7 +1006,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
+#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1018,7 +1017,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
+#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1029,7 +1028,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
+#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1040,7 +1039,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
+#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1051,7 +1050,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
+#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1062,7 +1061,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
+#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1073,7 +1072,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
+#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1084,7 +1083,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
+#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1095,7 +1094,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
+#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1106,29 +1105,29 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
+#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1139,7 +1138,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
+#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1150,40 +1149,40 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
+#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
+#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1194,7 +1193,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1205,7 +1204,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
+#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1216,7 +1215,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
+#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1227,7 +1226,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
+#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1238,7 +1237,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
+#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1249,7 +1248,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
+#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1260,7 +1259,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
+#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1271,7 +1270,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
+#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1282,7 +1281,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
+#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1293,7 +1292,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
+#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1304,7 +1303,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
+#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1315,7 +1314,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
+#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1326,7 +1325,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
+#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1337,7 +1336,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
+#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1348,7 +1347,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
+#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1359,7 +1358,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
+#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1370,7 +1369,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
+#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1381,7 +1380,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
+#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1392,7 +1391,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
+#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1403,7 +1402,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
+#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1414,7 +1413,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
+#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1425,7 +1424,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
+#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1436,7 +1435,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
+#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1447,7 +1446,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
+#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1458,7 +1457,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
+#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1469,7 +1468,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
+#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1480,7 +1479,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
+#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1491,7 +1490,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
+#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1502,7 +1501,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
+#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1513,7 +1512,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
+#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1524,7 +1523,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
+#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1535,7 +1534,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
+#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1546,7 +1545,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
+#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1557,7 +1556,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
+#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1568,7 +1567,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
+#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1579,7 +1578,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
+#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1590,7 +1589,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
+#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1601,7 +1600,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
+#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1612,7 +1611,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
+#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1623,7 +1622,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
+#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1634,7 +1633,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
+#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1645,7 +1644,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
+#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1656,7 +1655,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
+#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1667,7 +1666,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
+#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1678,7 +1677,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
+#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1689,7 +1688,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
+#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1700,7 +1699,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
+#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1711,7 +1710,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
+#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1722,7 +1721,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
+#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1733,7 +1732,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
+#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1744,7 +1743,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
+#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1755,7 +1754,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
+#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1766,7 +1765,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
+#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1777,7 +1776,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
+#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1788,7 +1787,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
+#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1799,7 +1798,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
+#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1810,7 +1809,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
+#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1821,7 +1820,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
+#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1832,7 +1831,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
+#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1843,7 +1842,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
+#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1854,7 +1853,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
+#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1865,7 +1864,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
+#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1876,7 +1875,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
+#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1887,7 +1886,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
+#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1898,7 +1897,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
+#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1909,7 +1908,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
+#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1920,7 +1919,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
+#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1931,7 +1930,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
+#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1942,7 +1941,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
+#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1953,7 +1952,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
+#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1964,7 +1963,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
+#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1975,7 +1974,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
+#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1986,29 +1985,29 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
+#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
+#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
+#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2019,7 +2018,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
+#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2030,7 +2029,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
+#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2041,7 +2040,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
+#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2052,18 +2051,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
+#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
+#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2074,7 +2073,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
+#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2085,7 +2084,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2096,7 +2095,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
+#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2107,7 +2106,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2118,7 +2117,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
+#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2129,7 +2128,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
+#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2140,7 +2139,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
+#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2151,7 +2150,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
+#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2162,7 +2161,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
+#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2173,7 +2172,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
+#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2184,7 +2183,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
+#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2195,7 +2194,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
+#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2206,7 +2205,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
+#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2217,7 +2216,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
+#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2228,7 +2227,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
+#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2239,7 +2238,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2250,7 +2249,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2261,7 +2260,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
+#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2272,7 +2271,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
+#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2283,7 +2282,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
+#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2294,7 +2293,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
+#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2305,7 +2304,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
+#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2316,7 +2315,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2327,7 +2326,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
+#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2338,7 +2337,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2349,7 +2348,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
+#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2360,7 +2359,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
+#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2371,7 +2370,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
+#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2382,7 +2381,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
+#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2393,7 +2392,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
+#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2404,7 +2403,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
+#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2415,7 +2414,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
+#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2426,7 +2425,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
+#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2437,7 +2436,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
+#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2448,7 +2447,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
+#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2459,7 +2458,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
+#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2470,7 +2469,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
+#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2481,7 +2480,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
+#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2492,7 +2491,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
+#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2503,7 +2502,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
+#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2514,7 +2513,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
+#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2525,7 +2524,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
+#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2536,7 +2535,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
+#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2547,7 +2546,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
+#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2558,7 +2557,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
+#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2569,7 +2568,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
+#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2580,7 +2579,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
+#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2591,18 +2590,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
+#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
+#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2613,18 +2612,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
+#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vrmpy(Vu32.b,Vv32.b)
C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
+#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2635,7 +2634,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
+#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2646,7 +2645,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2657,7 +2656,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
+#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2668,7 +2667,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
+#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2679,18 +2678,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
+#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2701,7 +2700,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
+#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2712,7 +2711,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
+#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2723,7 +2722,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
+#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2734,7 +2733,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
+#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2745,7 +2744,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
+#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2756,7 +2755,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
+#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2767,7 +2766,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
+#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2778,7 +2777,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
+#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2789,7 +2788,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
+#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2800,7 +2799,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
+#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2811,7 +2810,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
+#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2822,7 +2821,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
+#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2833,7 +2832,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
+#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2844,7 +2843,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
+#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2855,7 +2854,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
+#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2866,7 +2865,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
+#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2877,7 +2876,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
+#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2888,7 +2887,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
+#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2899,7 +2898,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
+#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2910,7 +2909,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
+#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2921,7 +2920,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
+#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2932,7 +2931,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
+#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2943,7 +2942,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
+#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2954,7 +2953,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
+#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2965,7 +2964,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
+#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2976,7 +2975,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
+#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2987,7 +2986,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
+#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2998,7 +2997,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
+#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3009,7 +3008,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
+#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3020,7 +3019,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
+#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3031,7 +3030,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
+#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3042,7 +3041,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
+#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3053,7 +3052,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
+#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3064,7 +3063,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
+#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3075,7 +3074,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
+#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3086,7 +3085,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
+#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3097,7 +3096,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
+#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3108,7 +3107,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
+#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3119,7 +3118,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
+#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3130,7 +3129,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
+#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3141,7 +3140,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
+#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3152,7 +3151,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
+#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3163,7 +3162,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
+#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3174,7 +3173,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
+#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3185,7 +3184,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
+#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3196,7 +3195,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
+#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3207,7 +3206,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
+#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3218,7 +3217,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
+#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3229,7 +3228,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
+#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3240,7 +3239,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
+#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3251,7 +3250,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
+#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3262,7 +3261,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
+#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3273,7 +3272,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
+#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3284,7 +3283,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
+#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3295,7 +3294,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
+#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3306,7 +3305,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
+#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3317,7 +3316,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
+#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3328,7 +3327,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
+#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3339,7 +3338,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
+#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 62
@@ -3350,7 +3349,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
+#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3361,7 +3360,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
+#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3372,7 +3371,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
+#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3383,7 +3382,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
+#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3394,7 +3393,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
+#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3405,7 +3404,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
+#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3416,7 +3415,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
+#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3427,7 +3426,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
+#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3438,7 +3437,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
+#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3449,7 +3448,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
+#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3460,7 +3459,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
+#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3471,7 +3470,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
+#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3482,7 +3481,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
+#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3493,7 +3492,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
+#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3504,7 +3503,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
+#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3515,7 +3514,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
+#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3526,7 +3525,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
+#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3537,7 +3536,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
+#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3548,7 +3547,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
+#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3559,7 +3558,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
+#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3570,7 +3569,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
+#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3581,7 +3580,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
+#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3592,7 +3591,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
+#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3603,7 +3602,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
+#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3614,7 +3613,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
+#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3625,7 +3624,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
+#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3636,7 +3635,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
+#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3647,7 +3646,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
+#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3658,7 +3657,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
+#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3669,7 +3668,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
+#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3680,7 +3679,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
+#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3691,7 +3690,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
+#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3702,7 +3701,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
+#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3713,7 +3712,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
+#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3724,7 +3723,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
+#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3735,7 +3734,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
+#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3746,7 +3745,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
+#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3757,7 +3756,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
+#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3768,7 +3767,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
+#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3779,7 +3778,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
+#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3790,7 +3789,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
+#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3801,7 +3800,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
+#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3812,7 +3811,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
+#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3823,7 +3822,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
+#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3834,7 +3833,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
+#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3845,7 +3844,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
+#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3856,7 +3855,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
+#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 65
@@ -3867,7 +3866,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
+#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3878,7 +3877,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
+#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3889,7 +3888,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
+#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3900,7 +3899,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
+#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3911,7 +3910,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
+#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3922,7 +3921,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
+#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3933,7 +3932,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
+#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3944,7 +3943,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
+#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3955,7 +3954,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
+#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3966,7 +3965,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
+#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3977,7 +3976,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
+#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3988,7 +3987,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
+#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)()
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3999,7 +3998,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
+#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4010,7 +4009,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
+#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4021,7 +4020,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
+#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4032,7 +4031,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
+#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4043,7 +4042,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
+#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4054,7 +4053,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
+#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4065,7 +4064,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
+#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4076,7 +4075,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
+#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4087,7 +4086,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
+#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4098,7 +4097,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
+#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4109,7 +4108,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
+#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4120,7 +4119,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
+#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4131,7 +4130,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
+#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4142,7 +4141,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
+#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4153,7 +4152,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
+#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4164,7 +4163,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
+#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4175,7 +4174,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
+#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4186,7 +4185,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
+#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4197,7 +4196,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
+#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4208,7 +4207,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
+#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4219,7 +4218,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
+#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4230,7 +4229,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
+#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4241,7 +4240,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
+#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4252,7 +4251,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
+#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4263,7 +4262,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
+#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4274,7 +4273,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
+#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4285,7 +4284,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
+#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4296,7 +4295,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
+#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 66
@@ -4307,7 +4306,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
+#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 66
@@ -4318,7 +4317,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
+#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 66
@@ -4329,7 +4328,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
+#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 66
@@ -4340,7 +4339,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
+#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 68
@@ -4351,7 +4350,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
+#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2)
#endif /* __HEXAGON_ARCH___ >= 68 */
#if __HVX_ARCH__ >= 68
@@ -4362,7 +4361,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
#endif /* __HEXAGON_ARCH___ >= 68 */
#if __HVX_ARCH__ >= 68
@@ -4373,7 +4372,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
+#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2)
#endif /* __HEXAGON_ARCH___ >= 68 */
#if __HVX_ARCH__ >= 68
@@ -4384,9 +4383,801 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vabs(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vabs(Vu32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vadd(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vadd(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vadd(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vadd(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vadd(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vfmv(Vu32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=Vu32.qf16
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=Vuu32.qf32
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=Vu32.qf32
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vcvt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vcvt(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.hf=vcvt(Vu32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vcvt(Vu32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vcvt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.hf=vcvt(Vu32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vcvt(Vu32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vcvt(Vu32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vcvt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vcvt(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vfmax(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vfmax(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vfmin(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vfmin(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vfneg(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vfneg(Vu32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 68 */
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vmax(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vmax(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vmin(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vmin(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vmpy(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vsub(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vsub(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vsub(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vsub(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vsub(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
#endif /* __HVX__ */
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c.h b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
index 32af848a94c4..77a7a8b9bb3a 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
@@ -11190,305 +11190,305 @@ half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
* 64-bit aligned if gentype is long, ulong, double.
*/
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
+char2 __ovld __purefn vload2(size_t offset, const __constant char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __constant uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __constant short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __constant ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __constant int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __constant uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __constant long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __constant ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __constant float *p);
+char3 __ovld __purefn vload3(size_t offset, const __constant char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __constant uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __constant short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __constant ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __constant int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __constant uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __constant long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __constant ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __constant float *p);
+char4 __ovld __purefn vload4(size_t offset, const __constant char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __constant uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __constant short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __constant ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __constant int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __constant uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __constant long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __constant ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __constant float *p);
+char8 __ovld __purefn vload8(size_t offset, const __constant char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __constant uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __constant short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __constant ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __constant int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __constant uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __constant long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __constant ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __constant float *p);
+char16 __ovld __purefn vload16(size_t offset, const __constant char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __constant uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __constant short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __constant ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __constant int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __constant uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __constant long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __constant ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __constant float *p);
#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
+double2 __ovld __purefn vload2(size_t offset, const __constant double *p);
+double3 __ovld __purefn vload3(size_t offset, const __constant double *p);
+double4 __ovld __purefn vload4(size_t offset, const __constant double *p);
+double8 __ovld __purefn vload8(size_t offset, const __constant double *p);
+double16 __ovld __purefn vload16(size_t offset, const __constant double *p);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
+half __ovld __purefn vload(size_t offset, const __constant half *p);
+half2 __ovld __purefn vload2(size_t offset, const __constant half *p);
+half3 __ovld __purefn vload3(size_t offset, const __constant half *p);
+half4 __ovld __purefn vload4(size_t offset, const __constant half *p);
+half8 __ovld __purefn vload8(size_t offset, const __constant half *p);
+half16 __ovld __purefn vload16(size_t offset, const __constant half *p);
#endif //cl_khr_fp16
#if defined(__opencl_c_generic_address_space)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
+char2 __ovld __purefn vload2(size_t offset, const char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const int *p);
+uint2 __ovld __purefn vload2(size_t offset, const uint *p);
+long2 __ovld __purefn vload2(size_t offset, const long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const float *p);
+char3 __ovld __purefn vload3(size_t offset, const char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const int *p);
+uint3 __ovld __purefn vload3(size_t offset, const uint *p);
+long3 __ovld __purefn vload3(size_t offset, const long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const float *p);
+char4 __ovld __purefn vload4(size_t offset, const char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const int *p);
+uint4 __ovld __purefn vload4(size_t offset, const uint *p);
+long4 __ovld __purefn vload4(size_t offset, const long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const float *p);
+char8 __ovld __purefn vload8(size_t offset, const char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const int *p);
+uint8 __ovld __purefn vload8(size_t offset, const uint *p);
+long8 __ovld __purefn vload8(size_t offset, const long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const float *p);
+char16 __ovld __purefn vload16(size_t offset, const char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const int *p);
+uint16 __ovld __purefn vload16(size_t offset, const uint *p);
+long16 __ovld __purefn vload16(size_t offset, const long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const float *p);
#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
+double2 __ovld __purefn vload2(size_t offset, const double *p);
+double3 __ovld __purefn vload3(size_t offset, const double *p);
+double4 __ovld __purefn vload4(size_t offset, const double *p);
+double8 __ovld __purefn vload8(size_t offset, const double *p);
+double16 __ovld __purefn vload16(size_t offset, const double *p);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
+half __ovld __purefn vload(size_t offset, const half *p);
+half2 __ovld __purefn vload2(size_t offset, const half *p);
+half3 __ovld __purefn vload3(size_t offset, const half *p);
+half4 __ovld __purefn vload4(size_t offset, const half *p);
+half8 __ovld __purefn vload8(size_t offset, const half *p);
+half16 __ovld __purefn vload16(size_t offset, const half *p);
#endif //cl_khr_fp16
#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
+char2 __ovld __purefn vload2(size_t offset, const __global char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __global uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __global short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __global ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __global int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __global uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __global long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __global ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __global float *p);
+char3 __ovld __purefn vload3(size_t offset, const __global char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __global uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __global short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __global ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __global int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __global uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __global long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __global ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __global float *p);
+char4 __ovld __purefn vload4(size_t offset, const __global char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __global uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __global short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __global ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __global int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __global uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __global long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __global ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __global float *p);
+char8 __ovld __purefn vload8(size_t offset, const __global char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __global uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __global short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __global ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __global int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __global uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __global long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __global ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __global float *p);
+char16 __ovld __purefn vload16(size_t offset, const __global char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __global uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __global short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __global ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __global int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __global uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __global long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __global ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __global float *p);
+char2 __ovld __purefn vload2(size_t offset, const __local char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __local uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __local short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __local ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __local int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __local uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __local long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __local ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __local float *p);
+char3 __ovld __purefn vload3(size_t offset, const __local char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __local uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __local short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __local ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __local int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __local uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __local long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __local ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __local float *p);
+char4 __ovld __purefn vload4(size_t offset, const __local char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __local uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __local short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __local ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __local int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __local uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __local long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __local ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __local float *p);
+char8 __ovld __purefn vload8(size_t offset, const __local char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __local uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __local short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __local ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __local int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __local uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __local long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __local ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __local float *p);
+char16 __ovld __purefn vload16(size_t offset, const __local char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __local uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __local short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __local ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __local int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __local uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __local long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __local ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __local float *p);
+char2 __ovld __purefn vload2(size_t offset, const __private char *p);
+uchar2 __ovld __purefn vload2(size_t offset, const __private uchar *p);
+short2 __ovld __purefn vload2(size_t offset, const __private short *p);
+ushort2 __ovld __purefn vload2(size_t offset, const __private ushort *p);
+int2 __ovld __purefn vload2(size_t offset, const __private int *p);
+uint2 __ovld __purefn vload2(size_t offset, const __private uint *p);
+long2 __ovld __purefn vload2(size_t offset, const __private long *p);
+ulong2 __ovld __purefn vload2(size_t offset, const __private ulong *p);
+float2 __ovld __purefn vload2(size_t offset, const __private float *p);
+char3 __ovld __purefn vload3(size_t offset, const __private char *p);
+uchar3 __ovld __purefn vload3(size_t offset, const __private uchar *p);
+short3 __ovld __purefn vload3(size_t offset, const __private short *p);
+ushort3 __ovld __purefn vload3(size_t offset, const __private ushort *p);
+int3 __ovld __purefn vload3(size_t offset, const __private int *p);
+uint3 __ovld __purefn vload3(size_t offset, const __private uint *p);
+long3 __ovld __purefn vload3(size_t offset, const __private long *p);
+ulong3 __ovld __purefn vload3(size_t offset, const __private ulong *p);
+float3 __ovld __purefn vload3(size_t offset, const __private float *p);
+char4 __ovld __purefn vload4(size_t offset, const __private char *p);
+uchar4 __ovld __purefn vload4(size_t offset, const __private uchar *p);
+short4 __ovld __purefn vload4(size_t offset, const __private short *p);
+ushort4 __ovld __purefn vload4(size_t offset, const __private ushort *p);
+int4 __ovld __purefn vload4(size_t offset, const __private int *p);
+uint4 __ovld __purefn vload4(size_t offset, const __private uint *p);
+long4 __ovld __purefn vload4(size_t offset, const __private long *p);
+ulong4 __ovld __purefn vload4(size_t offset, const __private ulong *p);
+float4 __ovld __purefn vload4(size_t offset, const __private float *p);
+char8 __ovld __purefn vload8(size_t offset, const __private char *p);
+uchar8 __ovld __purefn vload8(size_t offset, const __private uchar *p);
+short8 __ovld __purefn vload8(size_t offset, const __private short *p);
+ushort8 __ovld __purefn vload8(size_t offset, const __private ushort *p);
+int8 __ovld __purefn vload8(size_t offset, const __private int *p);
+uint8 __ovld __purefn vload8(size_t offset, const __private uint *p);
+long8 __ovld __purefn vload8(size_t offset, const __private long *p);
+ulong8 __ovld __purefn vload8(size_t offset, const __private ulong *p);
+float8 __ovld __purefn vload8(size_t offset, const __private float *p);
+char16 __ovld __purefn vload16(size_t offset, const __private char *p);
+uchar16 __ovld __purefn vload16(size_t offset, const __private uchar *p);
+short16 __ovld __purefn vload16(size_t offset, const __private short *p);
+ushort16 __ovld __purefn vload16(size_t offset, const __private ushort *p);
+int16 __ovld __purefn vload16(size_t offset, const __private int *p);
+uint16 __ovld __purefn vload16(size_t offset, const __private uint *p);
+long16 __ovld __purefn vload16(size_t offset, const __private long *p);
+ulong16 __ovld __purefn vload16(size_t offset, const __private ulong *p);
+float16 __ovld __purefn vload16(size_t offset, const __private float *p);
#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
+double2 __ovld __purefn vload2(size_t offset, const __global double *p);
+double3 __ovld __purefn vload3(size_t offset, const __global double *p);
+double4 __ovld __purefn vload4(size_t offset, const __global double *p);
+double8 __ovld __purefn vload8(size_t offset, const __global double *p);
+double16 __ovld __purefn vload16(size_t offset, const __global double *p);
+double2 __ovld __purefn vload2(size_t offset, const __local double *p);
+double3 __ovld __purefn vload3(size_t offset, const __local double *p);
+double4 __ovld __purefn vload4(size_t offset, const __local double *p);
+double8 __ovld __purefn vload8(size_t offset, const __local double *p);
+double16 __ovld __purefn vload16(size_t offset, const __local double *p);
+double2 __ovld __purefn vload2(size_t offset, const __private double *p);
+double3 __ovld __purefn vload3(size_t offset, const __private double *p);
+double4 __ovld __purefn vload4(size_t offset, const __private double *p);
+double8 __ovld __purefn vload8(size_t offset, const __private double *p);
+double16 __ovld __purefn vload16(size_t offset, const __private double *p);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
+half __ovld __purefn vload(size_t offset, const __global half *p);
+half2 __ovld __purefn vload2(size_t offset, const __global half *p);
+half3 __ovld __purefn vload3(size_t offset, const __global half *p);
+half4 __ovld __purefn vload4(size_t offset, const __global half *p);
+half8 __ovld __purefn vload8(size_t offset, const __global half *p);
+half16 __ovld __purefn vload16(size_t offset, const __global half *p);
+half __ovld __purefn vload(size_t offset, const __local half *p);
+half2 __ovld __purefn vload2(size_t offset, const __local half *p);
+half3 __ovld __purefn vload3(size_t offset, const __local half *p);
+half4 __ovld __purefn vload4(size_t offset, const __local half *p);
+half8 __ovld __purefn vload8(size_t offset, const __local half *p);
+half16 __ovld __purefn vload16(size_t offset, const __local half *p);
+half __ovld __purefn vload(size_t offset, const __private half *p);
+half2 __ovld __purefn vload2(size_t offset, const __private half *p);
+half3 __ovld __purefn vload3(size_t offset, const __private half *p);
+half4 __ovld __purefn vload4(size_t offset, const __private half *p);
+half8 __ovld __purefn vload8(size_t offset, const __private half *p);
+half16 __ovld __purefn vload16(size_t offset, const __private half *p);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
@@ -11736,13 +11736,13 @@ void __ovld vstore16(half16 data, size_t offset, __private half *p);
* The read address computed as (p + offset)
* must be 16-bit aligned.
*/
-float __ovld vload_half(size_t offset, const __constant half *p);
+float __ovld __purefn vload_half(size_t offset, const __constant half *p);
#if defined(__opencl_c_generic_address_space)
-float __ovld vload_half(size_t offset, const half *p);
+float __ovld __purefn vload_half(size_t offset, const half *p);
#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
+float __ovld __purefn vload_half(size_t offset, const __global half *p);
+float __ovld __purefn vload_half(size_t offset, const __local half *p);
+float __ovld __purefn vload_half(size_t offset, const __private half *p);
#endif //defined(__opencl_c_generic_address_space)
/**
@@ -11753,33 +11753,33 @@ float __ovld vload_half(size_t offset, const __private half *p);
* value is returned. The read address computed
* as (p + (offset * n)) must be 16-bit aligned.
*/
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __constant half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __constant half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __constant half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __constant half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __constant half *p);
#if defined(__opencl_c_generic_address_space)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const half *p);
#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __global half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __global half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __global half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __global half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __global half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __local half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __local half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __local half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __local half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __local half *p);
+float2 __ovld __purefn vload_half2(size_t offset, const __private half *p);
+float3 __ovld __purefn vload_half3(size_t offset, const __private half *p);
+float4 __ovld __purefn vload_half4(size_t offset, const __private half *p);
+float8 __ovld __purefn vload_half8(size_t offset, const __private half *p);
+float16 __ovld __purefn vload_half16(size_t offset, const __private half *p);
#endif //defined(__opencl_c_generic_address_space)
/**
@@ -12073,33 +12073,33 @@ void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
* The address computed as (p + (offset * 4))
* must be aligned to sizeof (half) * 4 bytes.
*/
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __constant half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __constant half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __constant half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __constant half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __constant half *p);
#if defined(__opencl_c_generic_address_space)
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const half *p);
#else
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __global half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __global half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __global half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __global half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __global half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __local half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __local half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __local half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __local half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __local half *p);
+float2 __ovld __purefn vloada_half2(size_t offset, const __private half *p);
+float3 __ovld __purefn vloada_half3(size_t offset, const __private half *p);
+float4 __ovld __purefn vloada_half4(size_t offset, const __private half *p);
+float8 __ovld __purefn vloada_half8(size_t offset, const __private half *p);
+float16 __ovld __purefn vloada_half16(size_t offset, const __private half *p);
#endif //defined(__opencl_c_generic_address_space)
/**
diff --git a/contrib/llvm-project/clang/lib/Headers/unwind.h b/contrib/llvm-project/clang/lib/Headers/unwind.h
index 029524b7bc84..6e069798f02d 100644
--- a/contrib/llvm-project/clang/lib/Headers/unwind.h
+++ b/contrib/llvm-project/clang/lib/Headers/unwind.h
@@ -172,7 +172,8 @@ typedef enum {
_UVRSC_CORE = 0, /* integer register */
_UVRSC_VFP = 1, /* vfp */
_UVRSC_WMMXD = 3, /* Intel WMMX data register */
- _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+ _UVRSC_WMMXC = 4, /* Intel WMMX control register */
+ _UVRSC_PSEUDO = 5 /* Special purpose pseudo register */
} _Unwind_VRS_RegClass;
typedef enum {
diff --git a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
index 9fa170410da3..0b136aeb580f 100644
--- a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
@@ -832,12 +832,16 @@ std::pair<Module *, bool> ModuleMap::findOrCreateModule(StringRef Name,
return std::make_pair(Result, true);
}
-Module *ModuleMap::createGlobalModuleFragmentForModuleUnit(SourceLocation Loc) {
- PendingSubmodules.emplace_back(
- new Module("<global>", Loc, nullptr, /*IsFramework*/ false,
- /*IsExplicit*/ true, NumCreatedModules++));
- PendingSubmodules.back()->Kind = Module::GlobalModuleFragment;
- return PendingSubmodules.back().get();
+Module *ModuleMap::createGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
+ Module *Parent) {
+ auto *Result = new Module("<global>", Loc, Parent, /*IsFramework*/ false,
+ /*IsExplicit*/ true, NumCreatedModules++);
+ Result->Kind = Module::GlobalModuleFragment;
+ // If the created module isn't owned by a parent, send it to PendingSubmodules
+ // to wait for its parent.
+ if (!Result->Parent)
+ PendingSubmodules.emplace_back(Result);
+ return Result;
}
Module *
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
index 41e7f3f1dccb..e71a65f031e4 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
@@ -472,11 +472,9 @@ void TokenLexer::ExpandFunctionArguments() {
// If the '##' came from expanding an argument, turn it into 'unknown'
// to avoid pasting.
- for (Token &Tok : llvm::make_range(ResultToks.begin() + FirstResult,
- ResultToks.end())) {
+ for (Token &Tok : llvm::drop_begin(ResultToks, FirstResult))
if (Tok.is(tok::hashhash))
Tok.setKind(tok::unknown);
- }
if(ExpandLocStart.isValid()) {
updateLocForMacroArgTokens(CurTok.getLocation(),
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 116724a0d50b..19cddc69ebfc 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -452,13 +452,14 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
CXXMethodDecl *Method;
if (FunctionTemplateDecl *FunTmpl
= dyn_cast<FunctionTemplateDecl>(LM.Method))
- Method = cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ Method = dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
else
- Method = cast<CXXMethodDecl>(LM.Method);
+ Method = dyn_cast<CXXMethodDecl>(LM.Method);
- Sema::CXXThisScopeRAII ThisScope(Actions, Method->getParent(),
- Method->getMethodQualifiers(),
- getLangOpts().CPlusPlus11);
+ Sema::CXXThisScopeRAII ThisScope(
+ Actions, Method ? Method->getParent() : nullptr,
+ Method ? Method->getMethodQualifiers() : Qualifiers{},
+ Method && getLangOpts().CPlusPlus11);
// Parse the exception-specification.
SourceRange SpecificationRange;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index 1bdeccc4cbf5..0c1f88bc51d1 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -2891,7 +2891,8 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
}
ExprResult Parser::ParseExtIntegerArgument() {
- assert(Tok.is(tok::kw__ExtInt) && "Not an extended int type");
+ assert(Tok.isOneOf(tok::kw__ExtInt, tok::kw__BitInt) &&
+ "Not an extended int type");
ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3882,11 +3883,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
DiagID, Policy);
break;
- case tok::kw__ExtInt: {
+ case tok::kw__ExtInt:
+ case tok::kw__BitInt: {
+ DiagnoseBitIntUse(Tok);
ExprResult ER = ParseExtIntegerArgument();
if (ER.isInvalid())
continue;
- isInvalid = DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+ isInvalid = DS.SetBitIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
ConsumedEnd = PrevTokLocation;
break;
}
@@ -5015,6 +5018,7 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw_char32_t:
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw___bf16:
case tok::kw_half:
case tok::kw_float:
@@ -5097,6 +5101,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw_char32_t:
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_half:
case tok::kw___bf16:
case tok::kw_float:
@@ -5268,6 +5273,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_half:
case tok::kw___bf16:
case tok::kw_float:
@@ -7476,3 +7482,24 @@ bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
}
return false;
}
+
+void Parser::DiagnoseBitIntUse(const Token &Tok) {
+ // If the token is for _ExtInt, diagnose it as being deprecated. Otherwise,
+ // the token is about _BitInt and gets (potentially) diagnosed as use of an
+ // extension.
+ assert(Tok.isOneOf(tok::kw__ExtInt, tok::kw__BitInt) &&
+ "expected either an _ExtInt or _BitInt token!");
+
+ SourceLocation Loc = Tok.getLocation();
+ if (Tok.is(tok::kw__ExtInt)) {
+ Diag(Loc, diag::warn_ext_int_deprecated)
+ << FixItHint::CreateReplacement(Loc, "_BitInt");
+ } else {
+ // In C2x mode, diagnose that the use is not compatible with pre-C2x modes.
+ // Otherwise, diagnose that the use is a Clang extension.
+ if (getLangOpts().C2x)
+ Diag(Loc, diag::warn_c17_compat_bit_int);
+ else
+ Diag(Loc, diag::ext_bit_int) << getLangOpts().CPlusPlus;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index 2c8b4f9f441f..09a3842f5809 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -1513,6 +1513,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___int64:
case tok::kw___int128:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_half:
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
index 4e5c0ac6c1c1..76c510ddd36c 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
@@ -2191,12 +2191,14 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
return;
}
- case tok::kw__ExtInt: {
+ case tok::kw__ExtInt:
+ case tok::kw__BitInt: {
+ DiagnoseBitIntUse(Tok);
ExprResult ER = ParseExtIntegerArgument();
if (ER.isInvalid())
DS.SetTypeSpecError();
else
- DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+ DS.SetBitIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
// Do this here because we have already consumed the close paren.
DS.SetRangeEnd(PrevTokLocation);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
index 613ad742c93f..300b022d83b9 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
@@ -3192,6 +3192,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
index be3823ecda01..35c9036fb27e 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
@@ -1690,6 +1690,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw__Atomic:
return TPResult::True;
+ case tok::kw__BitInt:
case tok::kw__ExtInt: {
if (NextToken().isNot(tok::l_paren))
return TPResult::Error;
@@ -1741,6 +1742,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_short:
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
diff --git a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
index 371557a624c9..e9b678b69594 100644
--- a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -203,7 +203,7 @@ std::string html::EscapeText(StringRef s, bool EscapeSpaces, bool ReplaceTabs) {
}
}
- return os.str();
+ return Str;
}
static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
diff --git a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
index 6c47cedfccf3..0a2ca54e244a 100644
--- a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -335,7 +335,7 @@ std::string CodeCompletionString::getAsString() const {
break;
}
}
- return OS.str();
+ return Result;
}
const char *CodeCompletionString::getTypedText() const {
@@ -640,7 +640,7 @@ static std::string getOverloadAsString(const CodeCompletionString &CCS) {
break;
}
}
- return OS.str();
+ return Result;
}
void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(
diff --git a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
index 4405f29f3d99..d4dc790c008a 100644
--- a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
@@ -365,7 +365,7 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_half:
case TST_int:
case TST_int128:
- case TST_extint:
+ case TST_bitint:
case TST_struct:
case TST_interface:
case TST_union:
@@ -551,7 +551,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_char32: return "char32_t";
case DeclSpec::TST_int: return "int";
case DeclSpec::TST_int128: return "__int128";
- case DeclSpec::TST_extint: return "_ExtInt";
+ case DeclSpec::TST_bitint: return "_BitInt";
case DeclSpec::TST_half: return "half";
case DeclSpec::TST_float: return "float";
case DeclSpec::TST_double: return "double";
@@ -932,7 +932,7 @@ bool DeclSpec::SetTypeSpecError() {
return false;
}
-bool DeclSpec::SetExtIntType(SourceLocation KWLoc, Expr *BitsExpr,
+bool DeclSpec::SetBitIntType(SourceLocation KWLoc, Expr *BitsExpr,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
assert(BitsExpr && "no expression provided!");
@@ -945,7 +945,7 @@ bool DeclSpec::SetExtIntType(SourceLocation KWLoc, Expr *BitsExpr,
return true;
}
- TypeSpecType = TST_extint;
+ TypeSpecType = TST_bitint;
ExprRep = BitsExpr;
TSTLoc = KWLoc;
TSTNameLoc = KWLoc;
@@ -1252,7 +1252,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
TypeSpecType != TST_char && TypeSpecType != TST_wchar &&
- !IsFixedPointType && TypeSpecType != TST_extint) {
+ !IsFixedPointType && TypeSpecType != TST_bitint) {
S.Diag(TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
// signed double -> double.
@@ -1302,7 +1302,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
" double");
TypeSpecType = TST_double; // _Complex -> _Complex double.
} else if (TypeSpecType == TST_int || TypeSpecType == TST_char ||
- TypeSpecType == TST_extint) {
+ TypeSpecType == TST_bitint) {
// Note that this intentionally doesn't include _Complex _Bool.
if (!S.getLangOpts().CPlusPlus)
S.Diag(TSTLoc, diag::ext_integer_complex);
diff --git a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
index 8cf7ec58eff5..38debc5aa9fc 100644
--- a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
+++ b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
@@ -806,17 +806,17 @@ multiclass VloadVstore<list<AddressSpace> addrspaces, bit defStores> {
foreach AS = addrspaces in {
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload" # VSize] in {
- def : Builtin<name, [VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, AS>]>;
- def : Builtin<name, [VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, AS>]>;
- def : Builtin<name, [VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, AS>]>;
- def : Builtin<name, [VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, AS>]>;
- def : Builtin<name, [VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, AS>]>;
- def : Builtin<name, [VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, AS>]>;
- def : Builtin<name, [VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, AS>]>;
- def : Builtin<name, [VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, AS>]>;
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, AS>]>;
- def : Builtin<name, [VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, AS>]>;
- def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<name, [VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>], Attr.Pure>;
}
if defStores then {
foreach name = ["vstore" # VSize] in {
@@ -848,10 +848,10 @@ defm : VloadVstore<[ConstantAS], 0>;
multiclass VloadVstoreHalf<list<AddressSpace> addrspaces, bit defStores> {
foreach AS = addrspaces in {
- def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>], Attr.Pure>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize, "vloada_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>], Attr.Pure>;
}
}
if defStores then {
@@ -877,7 +877,7 @@ let MaxVersion = CL20 in {
let MinVersion = CL20 in {
defm : VloadVstoreHalf<[GenericAS], 1>;
}
-// vload with constant address space is available regardless of version.
+// vload_half and vloada_half with constant address space are available regardless of version.
defm : VloadVstoreHalf<[ConstantAS], 0>;
// OpenCL v3.0 s6.15.8 - Synchronization Functions.
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index a2b8f475aa8c..734ed0f62ec6 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -1881,8 +1881,8 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
if (Ty->isDependentType())
return;
- if (Ty->isExtIntType()) {
- if (!Context.getTargetInfo().hasExtIntType()) {
+ if (Ty->isBitIntType()) {
+ if (!Context.getTargetInfo().hasBitIntType()) {
PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
if (D)
PD << D;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
index 100f8e36a9b8..b69492768848 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
@@ -792,7 +792,7 @@ attrMatcherRuleListToString(ArrayRef<attr::SubjectMatchRule> Rules) {
OS << (I.index() == Rules.size() - 1 ? ", and " : ", ");
OS << "'" << attr::getSubjectMatchRuleSpelling(I.value()) << "'";
}
- return OS.str();
+ return Result;
}
} // end anonymous namespace
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
index 840b3daae63c..59601c5ce79d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
@@ -886,7 +886,6 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
diag::warn_maybe_capture_bad_target_this_ptr, Callee,
*this);
}
- return;
}
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
index c4826b5a6e8f..8cecf6c6ab4f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -736,8 +736,15 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
QualType T =
Context.getTypeDeclType(cast<TypeDecl>(SD->getUnderlyingDecl()));
+
+ if (T->isEnumeralType())
+ Diag(IdInfo.IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+
TypeLocBuilder TLB;
- if (isa<InjectedClassNameType>(T)) {
+ if (const auto *USD = dyn_cast<UsingShadowDecl>(SD)) {
+ T = Context.getUsingType(USD, T);
+ TLB.pushTypeSpec(T).setNameLoc(IdInfo.IdentifierLoc);
+ } else if (isa<InjectedClassNameType>(T)) {
InjectedClassNameTypeLoc InjectedTL
= TLB.push<InjectedClassNameTypeLoc>(T);
InjectedTL.setNameLoc(IdInfo.IdentifierLoc);
@@ -770,9 +777,6 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
}
- if (T->isEnumeralType())
- Diag(IdInfo.IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
-
SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
IdInfo.CCLoc);
return false;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index 33e2b3b5027d..4e83fa1fffca 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -195,6 +195,29 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
return false;
}
+/// Check that the argument to __builtin_function_start is a function.
+static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 1))
+ return true;
+
+ ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
+ if (Arg.isInvalid())
+ return true;
+
+ TheCall->setArg(0, Arg.get());
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
+ Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext()));
+
+ if (!FD) {
+ S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
+ << TheCall->getSourceRange();
+ return true;
+ }
+
+ return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ TheCall->getBeginLoc());
+}
+
/// Check the number of arguments and set the result type to
/// the argument type.
static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
@@ -325,17 +348,17 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
}
}
- // Disallow signed ExtIntType args larger than 128 bits to mul function until
- // we improve backend support.
+ // Disallow signed bit-precise integer args larger than 128 bits to mul
+ // function until we improve backend support.
if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
for (unsigned I = 0; I < 3; ++I) {
const auto Arg = TheCall->getArg(I);
// Third argument will be a pointer.
auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
- if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
+ if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
S.getASTContext().getIntWidth(Ty) > 128)
return S.Diag(Arg->getBeginLoc(),
- diag::err_overflow_builtin_ext_int_max_size)
+ diag::err_overflow_builtin_bit_int_max_size)
<< 128;
}
}
@@ -446,14 +469,14 @@ public:
break;
}
- auto OptionalFW = FS.getFieldWidth();
- if (OptionalFW.getHowSpecified() !=
+ analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
+ if (FW.getHowSpecified() !=
analyze_format_string::OptionalAmount::HowSpecified::Constant)
return true;
- unsigned SourceSize = OptionalFW.getConstantAmount() + NulByte;
+ unsigned SourceSize = FW.getConstantAmount() + NulByte;
- auto DestSizeAPS = ComputeSizeArgument(FS.getArgIndex());
+ Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex());
if (!DestSizeAPS)
return true;
@@ -652,20 +675,53 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
isConstantEvaluated())
return;
- unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
+ bool UseDABAttr = false;
+ const FunctionDecl *UseDecl = FD;
+
+ const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
+ if (DABAttr) {
+ UseDecl = DABAttr->getFunction();
+ assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
+ UseDABAttr = true;
+ }
+
+ unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true);
+
if (!BuiltinID)
return;
const TargetInfo &TI = getASTContext().getTargetInfo();
unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
+ auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> {
+ // If we refer to a diagnose_as_builtin attribute, we need to change the
+ // argument index to refer to the arguments of the called function. Unless
+ // the index is out of bounds, which presumably means it's a variadic
+ // function.
+ if (!UseDABAttr)
+ return Index;
+ unsigned DABIndices = DABAttr->argIndices_size();
+ unsigned NewIndex = Index < DABIndices
+ ? DABAttr->argIndices_begin()[Index]
+ : Index - DABIndices + FD->getNumParams();
+ if (NewIndex >= TheCall->getNumArgs())
+ return llvm::None;
+ return NewIndex;
+ };
+
auto ComputeExplicitObjectSizeArgument =
[&](unsigned Index) -> Optional<llvm::APSInt> {
+ Optional<unsigned> IndexOptional = TranslateIndex(Index);
+ if (!IndexOptional)
+ return llvm::None;
+ unsigned NewIndex = IndexOptional.getValue();
Expr::EvalResult Result;
- Expr *SizeArg = TheCall->getArg(Index);
+ Expr *SizeArg = TheCall->getArg(NewIndex);
if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
return llvm::None;
- return Result.Val.getInt();
+ llvm::APSInt Integer = Result.Val.getInt();
+ Integer.setIsUnsigned(true);
+ return Integer;
};
auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
@@ -680,7 +736,12 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
BOSType = POS->getType();
}
- const Expr *ObjArg = TheCall->getArg(Index);
+ Optional<unsigned> IndexOptional = TranslateIndex(Index);
+ if (!IndexOptional)
+ return llvm::None;
+ unsigned NewIndex = IndexOptional.getValue();
+
+ const Expr *ObjArg = TheCall->getArg(NewIndex);
uint64_t Result;
if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
return llvm::None;
@@ -690,7 +751,12 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
};
auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> {
- Expr *ObjArg = TheCall->getArg(Index);
+ Optional<unsigned> IndexOptional = TranslateIndex(Index);
+ if (!IndexOptional)
+ return llvm::None;
+ unsigned NewIndex = IndexOptional.getValue();
+
+ const Expr *ObjArg = TheCall->getArg(NewIndex);
uint64_t Result;
if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
return llvm::None;
@@ -898,7 +964,8 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
}
if (!SourceSize || !DestinationSize ||
- SourceSize.getValue().ule(DestinationSize.getValue()))
+ llvm::APSInt::compareValues(SourceSize.getValue(),
+ DestinationSize.getValue()) <= 0)
return;
StringRef FunctionName = GetFunctionName();
@@ -1874,6 +1941,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinAddressof(*this, TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_function_start:
+ if (SemaBuiltinFunctionStart(*this, TheCall))
+ return ExprError();
+ break;
case Builtin::BI__builtin_is_aligned:
case Builtin::BI__builtin_align_up:
case Builtin::BI__builtin_align_down:
@@ -2098,20 +2169,85 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
- case Builtin::BI__builtin_elementwise_abs:
- if (SemaBuiltinElementwiseMathOneArg(TheCall))
+ // __builtin_elementwise_abs restricts the element type to signed integers or
+ // floating point types only.
+ case Builtin::BI__builtin_elementwise_abs: {
+ if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return ExprError();
+
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ QualType EltTy = ArgTy;
+
+ if (auto *VecTy = EltTy->getAs<VectorType>())
+ EltTy = VecTy->getElementType();
+ if (EltTy->isUnsignedIntegerType()) {
+ Diag(TheCall->getArg(0)->getBeginLoc(),
+ diag::err_builtin_invalid_arg_type)
+ << 1 << /* signed integer or float ty*/ 3 << ArgTy;
+ return ExprError();
+ }
+ break;
+ }
+
+ // __builtin_elementwise_ceil restricts the element type to floating point
+ // types only.
+ case Builtin::BI__builtin_elementwise_ceil: {
+ if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return ExprError();
+
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ QualType EltTy = ArgTy;
+
+ if (auto *VecTy = EltTy->getAs<VectorType>())
+ EltTy = VecTy->getElementType();
+ if (!EltTy->isFloatingType()) {
+ Diag(TheCall->getArg(0)->getBeginLoc(),
+ diag::err_builtin_invalid_arg_type)
+ << 1 << /* float ty*/ 5 << ArgTy;
+
return ExprError();
+ }
break;
+ }
+
case Builtin::BI__builtin_elementwise_min:
case Builtin::BI__builtin_elementwise_max:
if (SemaBuiltinElementwiseMath(TheCall))
return ExprError();
break;
case Builtin::BI__builtin_reduce_max:
- case Builtin::BI__builtin_reduce_min:
- if (SemaBuiltinReduceMath(TheCall))
+ case Builtin::BI__builtin_reduce_min: {
+ if (PrepareBuiltinReduceMathOneArgCall(TheCall))
+ return ExprError();
+
+ const Expr *Arg = TheCall->getArg(0);
+ const auto *TyA = Arg->getType()->getAs<VectorType>();
+ if (!TyA) {
+ Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* vector ty*/ 4 << Arg->getType();
+ return ExprError();
+ }
+
+ TheCall->setType(TyA->getElementType());
+ break;
+ }
+
+ // __builtin_reduce_xor supports vector of integers only.
+ case Builtin::BI__builtin_reduce_xor: {
+ if (PrepareBuiltinReduceMathOneArgCall(TheCall))
+ return ExprError();
+
+ const Expr *Arg = TheCall->getArg(0);
+ const auto *TyA = Arg->getType()->getAs<VectorType>();
+ if (!TyA || !TyA->getElementType()->isIntegerType()) {
+ Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* vector of integers */ 6 << Arg->getType();
return ExprError();
+ }
+ TheCall->setType(TyA->getElementType());
break;
+ }
+
case Builtin::BI__builtin_matrix_transpose:
return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
@@ -3496,14 +3632,43 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_altivec_dss:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
case PPC::BI__builtin_tbegin:
- case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
- case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
+ case PPC::BI__builtin_tend:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) ||
+ SemaFeatureCheck(*this, TheCall, "htm",
+ diag::err_ppc_builtin_requires_htm);
+ case PPC::BI__builtin_tsr:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
+ SemaFeatureCheck(*this, TheCall, "htm",
+ diag::err_ppc_builtin_requires_htm);
case PPC::BI__builtin_tabortwc:
- case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
+ case PPC::BI__builtin_tabortdc:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
+ SemaFeatureCheck(*this, TheCall, "htm",
+ diag::err_ppc_builtin_requires_htm);
case PPC::BI__builtin_tabortwci:
case PPC::BI__builtin_tabortdci:
- return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ return SemaFeatureCheck(*this, TheCall, "htm",
+ diag::err_ppc_builtin_requires_htm) ||
+ (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31));
+ case PPC::BI__builtin_tabort:
+ case PPC::BI__builtin_tcheck:
+ case PPC::BI__builtin_treclaim:
+ case PPC::BI__builtin_trechkpt:
+ case PPC::BI__builtin_tendall:
+ case PPC::BI__builtin_tresume:
+ case PPC::BI__builtin_tsuspend:
+ case PPC::BI__builtin_get_texasr:
+ case PPC::BI__builtin_get_texasru:
+ case PPC::BI__builtin_get_tfhar:
+ case PPC::BI__builtin_get_tfiar:
+ case PPC::BI__builtin_set_texasr:
+ case PPC::BI__builtin_set_texasru:
+ case PPC::BI__builtin_set_tfhar:
+ case PPC::BI__builtin_set_tfiar:
+ case PPC::BI__builtin_ttest:
+ return SemaFeatureCheck(*this, TheCall, "htm",
+ diag::err_ppc_builtin_requires_htm);
// According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
// __builtin_(un)pack_longdouble are available only if long double uses IBM
// extended double representation.
@@ -5819,8 +5984,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
? 0
: 1);
- if (ValType->isExtIntType()) {
- Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit);
+ if (ValType->isBitIntType()) {
+ Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit);
return ExprError();
}
@@ -6217,11 +6382,11 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// gracefully.
TheCall->setType(ResultType);
- // Prohibit use of _ExtInt with atomic builtins.
- // The arguments would have already been converted to the first argument's
- // type, so only need to check the first argument.
- const auto *ExtIntValType = ValType->getAs<ExtIntType>();
- if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) {
+ // Prohibit problematic uses of bit-precise integer types with atomic
+ // builtins. The arguments would have already been converted to the first
+ // argument's type, so only need to check the first argument.
+ const auto *BitIntValType = ValType->getAs<BitIntType>();
+ if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) {
Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
return ExprError();
}
@@ -11249,7 +11414,7 @@ struct IntRange {
false/*NonNegative*/);
}
- if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ if (const auto *EIT = dyn_cast<BitIntType>(T))
return IntRange(EIT->getNumBits(), EIT->isUnsigned());
const BuiltinType *BT = cast<BuiltinType>(T);
@@ -11275,7 +11440,7 @@ struct IntRange {
if (const EnumType *ET = dyn_cast<EnumType>(T))
T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
- if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ if (const auto *EIT = dyn_cast<BitIntType>(T))
return IntRange(EIT->getNumBits(), EIT->isUnsigned());
const BuiltinType *BT = cast<BuiltinType>(T);
@@ -16697,26 +16862,19 @@ static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc,
return false;
}
-bool Sema::SemaBuiltinElementwiseMathOneArg(CallExpr *TheCall) {
+bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
if (checkArgCount(*this, TheCall, 1))
return true;
ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
- SourceLocation ArgLoc = TheCall->getArg(0)->getBeginLoc();
if (A.isInvalid())
return true;
TheCall->setArg(0, A.get());
QualType TyA = A.get()->getType();
- if (checkMathBuiltinElementType(*this, ArgLoc, TyA))
- return true;
- QualType EltTy = TyA;
- if (auto *VecTy = EltTy->getAs<VectorType>())
- EltTy = VecTy->getElementType();
- if (EltTy->isUnsignedIntegerType())
- return Diag(ArgLoc, diag::err_builtin_invalid_arg_type)
- << 1 << /*signed integer or float ty*/ 3 << TyA;
+ if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
+ return true;
TheCall->setType(TyA);
return false;
@@ -16752,7 +16910,7 @@ bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) {
return false;
}
-bool Sema::SemaBuiltinReduceMath(CallExpr *TheCall) {
+bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
if (checkArgCount(*this, TheCall, 1))
return true;
@@ -16761,14 +16919,6 @@ bool Sema::SemaBuiltinReduceMath(CallExpr *TheCall) {
return true;
TheCall->setArg(0, A.get());
- const VectorType *TyA = A.get()->getType()->getAs<VectorType>();
- if (!TyA) {
- SourceLocation ArgLoc = TheCall->getArg(0)->getBeginLoc();
- return Diag(ArgLoc, diag::err_builtin_invalid_arg_type)
- << 1 << /* vector ty*/ 4 << A.get()->getType();
- }
-
- TheCall->setType(TyA->getElementType());
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
index 083a67db7a91..93c07ccc891f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
@@ -569,7 +569,6 @@ void PreferredTypeBuilder::enterMemAccess(Sema &S, SourceLocation Tok,
return;
// Keep the expected type, only update the location.
ExpectedLoc = Tok;
- return;
}
void PreferredTypeBuilder::enterUnary(Sema &S, SourceLocation Tok,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index b999b08d1662..e89cecd08cca 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -237,9 +237,9 @@ static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
// placeholder type shall not be a coroutine."
if (FD->getReturnType()->isUndeducedType())
DiagInvalid(DiagAutoRet);
- // [dcl.fct.def.coroutine]p1: "The parameter-declaration-clause of the
- // coroutine shall not terminate with an ellipsis that is not part of a
- // parameter-declaration."
+ // [dcl.fct.def.coroutine]p1
+ // The parameter-declaration-clause of the coroutine shall not terminate with
+ // an ellipsis that is not part of a parameter-declaration.
if (FD->isVariadic())
DiagInvalid(DiagVarargs);
@@ -579,8 +579,12 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
/*TopLevelOfInitList=*/false,
/*TreatUnavailableAsInvalid=*/false);
- // Attempt to initialize the promise type with the arguments.
- // If that fails, fall back to the promise type's default constructor.
+ // [dcl.fct.def.coroutine]5.7
+ // promise-constructor-arguments is determined as follows: overload
+ // resolution is performed on a promise constructor call created by
+ // assembling an argument list q_1 ... q_n . If a viable constructor is
+ // found ([over.match.viable]), then promise-constructor-arguments is ( q_1
+ // , ..., q_n ), otherwise promise-constructor-arguments is empty.
if (InitSeq) {
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, CtorArgExprs);
if (Result.isInvalid()) {
@@ -648,6 +652,10 @@ static void checkNoThrow(Sema &S, const Stmt *E,
return;
}
if (ThrowingDecls.empty()) {
+ // [dcl.fct.def.coroutine]p15
+ // The expression co_­await promise.final_­suspend() shall not be
+ // potentially-throwing ([except.spec]).
+ //
// First time seeing an error, emit the error message.
S.Diag(cast<FunctionDecl>(S.CurContext)->getLocation(),
diag::err_coroutine_promise_final_suspend_requires_nothrow);
@@ -995,9 +1003,8 @@ static Expr *buildStdNoThrowDeclRef(Sema &S, SourceLocation Loc) {
LookupResult Result(S, &S.PP.getIdentifierTable().get("nothrow"), Loc,
Sema::LookupOrdinaryName);
if (!S.LookupQualifiedName(Result, Std)) {
- // FIXME: <coroutine> should have been included already.
- // If we require it to include <new> then this diagnostic is no longer
- // needed.
+ // <coroutine> is not requred to include <new>, so we couldn't omit
+ // the check here.
S.Diag(Loc, diag::err_implicit_coroutine_std_nothrow_type_not_found);
return nullptr;
}
@@ -1029,9 +1036,21 @@ static FunctionDecl *findDeleteForPromise(Sema &S, SourceLocation Loc,
auto *PointeeRD = PromiseType->getAsCXXRecordDecl();
assert(PointeeRD && "PromiseType must be a CxxRecordDecl type");
+ // [dcl.fct.def.coroutine]p12
+ // The deallocation function's name is looked up by searching for it in the
+ // scope of the promise type. If nothing is found, a search is performed in
+ // the global scope.
if (S.FindDeallocationFunction(Loc, PointeeRD, DeleteName, OperatorDelete))
return nullptr;
+ // FIXME: We didn't implement following selection:
+ // [dcl.fct.def.coroutine]p12
+ // If both a usual deallocation function with only a pointer parameter and a
+ // usual deallocation function with both a pointer parameter and a size
+ // parameter are found, then the selected deallocation function shall be the
+ // one with two parameters. Otherwise, the selected deallocation function
+ // shall be the function with one parameter.
+
if (!OperatorDelete) {
// Look for a global declaration.
const bool CanProvideSize = S.isCompleteType(Loc, PromiseType);
@@ -1062,8 +1081,8 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
return;
}
- // Coroutines [stmt.return]p1:
- // A return statement shall not appear in a coroutine.
+ // [stmt.return.coroutine]p1:
+ // A coroutine shall not enclose a return statement ([stmt.return]).
if (Fn->FirstReturnLoc.isValid()) {
assert(Fn->FirstCoroutineStmtLoc.isValid() &&
"first coroutine location not set");
@@ -1164,12 +1183,15 @@ bool CoroutineStmtBuilder::makeReturnOnAllocFailure() {
assert(!IsPromiseDependentType &&
"cannot make statement while the promise type is dependent");
- // [dcl.fct.def.coroutine]/8
- // The unqualified-id get_return_object_on_allocation_failure is looked up in
- // the scope of class P by class member access lookup (3.4.5). ...
- // If an allocation function returns nullptr, ... the coroutine return value
- // is obtained by a call to ... get_return_object_on_allocation_failure().
-
+ // [dcl.fct.def.coroutine]p10
+ // If a search for the name get_­return_­object_­on_­allocation_­failure in
+ // the scope of the promise type ([class.member.lookup]) finds any
+ // declarations, then the result of a call to an allocation function used to
+ // obtain storage for the coroutine state is assumed to return nullptr if it
+ // fails to obtain storage, ... If the allocation function returns nullptr,
+ // ... and the return value is obtained by a call to
+ // T::get_­return_­object_­on_­allocation_­failure(), where T is the
+ // promise type.
DeclarationName DN =
S.PP.getIdentifierInfo("get_return_object_on_allocation_failure");
LookupResult Found(S, DN, Loc, Sema::LookupMemberName);
@@ -1215,12 +1237,11 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
const bool RequiresNoThrowAlloc = ReturnStmtOnAllocFailure != nullptr;
- // [dcl.fct.def.coroutine]/7
- // Lookup allocation functions using a parameter list composed of the
- // requested size of the coroutine state being allocated, followed by
- // the coroutine function's arguments. If a matching allocation function
- // exists, use it. Otherwise, use an allocation function that just takes
- // the requested size.
+ // According to [dcl.fct.def.coroutine]p9, Lookup allocation functions using a
+ // parameter list composed of the requested size of the coroutine state being
+ // allocated, followed by the coroutine function's arguments. If a matching
+ // allocation function exists, use it. Otherwise, use an allocation function
+ // that just takes the requested size.
FunctionDecl *OperatorNew = nullptr;
FunctionDecl *OperatorDelete = nullptr;
@@ -1228,21 +1249,32 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
bool PassAlignment = false;
SmallVector<Expr *, 1> PlacementArgs;
- // [dcl.fct.def.coroutine]/7
- // "The allocation function’s name is looked up in the scope of P.
- // [...] If the lookup finds an allocation function in the scope of P,
- // overload resolution is performed on a function call created by assembling
- // an argument list. The first argument is the amount of space requested,
- // and has type std::size_t. The lvalues p1 ... pn are the succeeding
- // arguments."
+ // [dcl.fct.def.coroutine]p9
+ // An implementation may need to allocate additional storage for a
+ // coroutine.
+ // This storage is known as the coroutine state and is obtained by calling a
+ // non-array allocation function ([basic.stc.dynamic.allocation]). The
+ // allocation function's name is looked up by searching for it in the scope of
+ // the promise type.
+ // - If any declarations are found, overload resolution is performed on a
+ // function call created by assembling an argument list. The first argument is
+ // the amount of space requested, and has type std::size_t. The
+ // lvalues p1 ... pn are the succeeding arguments.
//
// ...where "p1 ... pn" are defined earlier as:
//
- // [dcl.fct.def.coroutine]/3
- // "For a coroutine f that is a non-static member function, let P1 denote the
- // type of the implicit object parameter (13.3.1) and P2 ... Pn be the types
- // of the function parameters; otherwise let P1 ... Pn be the types of the
- // function parameters. Let p1 ... pn be lvalues denoting those objects."
+ // [dcl.fct.def.coroutine]p3
+ // The promise type of a coroutine is `std::coroutine_traits<R, P1, ...,
+ // Pn>`
+ // , where R is the return type of the function, and `P1, ..., Pn` are the
+ // sequence of types of the non-object function parameters, preceded by the
+ // type of the object parameter ([dcl.fct]) if the coroutine is a non-static
+ // member function. [dcl.fct.def.coroutine]p4 In the following, p_i is an
+ // lvalue of type P_i, where p1 denotes the object parameter and p_i+1 denotes
+ // the i-th non-object function parameter for a non-static member function,
+ // and p_i denotes the i-th function parameter otherwise. For a non-static
+ // member function, q_1 is an lvalue that denotes *this; any other q_i is an
+ // lvalue that denotes the parameter copy corresponding to p_i.
if (auto *MD = dyn_cast<CXXMethodDecl>(&FD)) {
if (MD->isInstance() && !isLambdaCallOperator(MD)) {
ExprResult ThisExpr = S.ActOnCXXThis(Loc);
@@ -1273,10 +1305,10 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
/*isArray*/ false, PassAlignment, PlacementArgs,
OperatorNew, UnusedResult, /*Diagnose*/ false);
- // [dcl.fct.def.coroutine]/7
- // "If no matching function is found, overload resolution is performed again
- // on a function call created by passing just the amount of space required as
- // an argument of type std::size_t."
+ // [dcl.fct.def.coroutine]p9
+ // If no viable function is found ([over.match.viable]), overload resolution
+ // is performed again on a function call created by passing just the amount of
+ // space required as an argument of type std::size_t.
if (!OperatorNew && !PlacementArgs.empty()) {
PlacementArgs.clear();
S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Class,
@@ -1285,10 +1317,11 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
OperatorNew, UnusedResult, /*Diagnose*/ false);
}
- // [dcl.fct.def.coroutine]/7
- // "The allocation function’s name is looked up in the scope of P. If this
- // lookup fails, the allocation function’s name is looked up in the global
- // scope."
+ // [dcl.fct.def.coroutine]p9
+ // The allocation function's name is looked up by searching for it in the
+ // scope of the promise type.
+ // - If any declarations are found, ...
+ // - Otherwise, a search is performed in the global scope.
if (!OperatorNew) {
S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Global,
/*DeleteScope*/ Sema::AFS_Both, PromiseType,
@@ -1328,8 +1361,12 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
}
}
- if ((OperatorDelete = findDeleteForPromise(S, Loc, PromiseType)) == nullptr)
+ if ((OperatorDelete = findDeleteForPromise(S, Loc, PromiseType)) == nullptr) {
+ // FIXME: We should add an error here. According to:
+ // [dcl.fct.def.coroutine]p12
+ // If no usual deallocation function is found, the program is ill-formed.
return false;
+ }
Expr *FramePtr =
S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_frame, {});
@@ -1368,7 +1405,11 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
SmallVector<Expr *, 2> DeleteArgs{CoroFree};
- // Check if we need to pass the size.
+ // [dcl.fct.def.coroutine]p12
+ // The selected deallocation function shall be called with the address of
+ // the block of storage to be reclaimed as its first argument. If a
+ // deallocation function with a parameter of type std::size_t is
+ // used, the size of the block is passed as the corresponding argument.
const auto *OpDeleteType =
OpDeleteQualType.getTypePtr()->castAs<FunctionProtoType>();
if (OpDeleteType->getNumParams() > 1)
@@ -1391,9 +1432,13 @@ bool CoroutineStmtBuilder::makeOnFallthrough() {
assert(!IsPromiseDependentType &&
"cannot make statement while the promise type is dependent");
- // [dcl.fct.def.coroutine]/4
- // The unqualified-ids 'return_void' and 'return_value' are looked up in
- // the scope of class P. If both are found, the program is ill-formed.
+ // [dcl.fct.def.coroutine]/p6
+ // If searches for the names return_­void and return_­value in the scope of
+ // the promise type each find any declarations, the program is ill-formed.
+ // [Note 1: If return_­void is found, flowing off the end of a coroutine is
+ // equivalent to a co_­return with no operand. Otherwise, flowing off the end
+ // of a coroutine results in undefined behavior ([stmt.return.coroutine]). —
+ // end note]
bool HasRVoid, HasRValue;
LookupResult LRVoid =
lookupMember(S, "return_void", PromiseRecordDecl, Loc, HasRVoid);
@@ -1414,18 +1459,20 @@ bool CoroutineStmtBuilder::makeOnFallthrough() {
<< LRValue.getLookupName();
return false;
} else if (!HasRVoid && !HasRValue) {
- // FIXME: The PDTS currently specifies this case as UB, not ill-formed.
- // However we still diagnose this as an error since until the PDTS is fixed.
- S.Diag(FD.getLocation(),
- diag::err_coroutine_promise_requires_return_function)
- << PromiseRecordDecl;
- S.Diag(PromiseRecordDecl->getLocation(), diag::note_defined_here)
- << PromiseRecordDecl;
- return false;
+ // We need to set 'Fallthrough'. Otherwise the other analysis part might
+ // think the coroutine has defined a return_value method. So it might emit
+ // **false** positive warning. e.g.,
+ //
+ // promise_without_return_func foo() {
+ // co_await something();
+ // }
+ //
+ // Then AnalysisBasedWarning would emit a warning about `foo()` lacking a
+ // co_return statements, which isn't correct.
+ Fallthrough = S.ActOnNullStmt(PromiseRecordDecl->getLocation());
+ if (Fallthrough.isInvalid())
+ return false;
} else if (HasRVoid) {
- // If the unqualified-id return_void is found, flowing off the end of a
- // coroutine is equivalent to a co_return with no operand. Otherwise,
- // flowing off the end of a coroutine results in undefined behavior.
Fallthrough = S.BuildCoreturnStmt(FD.getLocation(), nullptr,
/*IsImplicit*/false);
Fallthrough = S.ActOnFinishFullStmt(Fallthrough.get());
@@ -1481,8 +1528,9 @@ bool CoroutineStmtBuilder::makeOnException() {
}
bool CoroutineStmtBuilder::makeReturnObject() {
- // Build implicit 'p.get_return_object()' expression and form initialization
- // of return type from it.
+ // [dcl.fct.def.coroutine]p7
+ // The expression promise.get_­return_­object() is used to initialize the
+ // returned reference or prvalue result object of a call to a coroutine.
ExprResult ReturnObject =
buildPromiseCall(S, Fn.CoroutinePromise, Loc, "get_return_object", None);
if (ReturnObject.isInvalid())
@@ -1620,6 +1668,12 @@ bool Sema::buildCoroutineParameterMoves(SourceLocation Loc) {
if (!ScopeInfo->CoroutineParameterMoves.empty())
return false;
+ // [dcl.fct.def.coroutine]p13
+ // When a coroutine is invoked, after initializing its parameters
+ // ([expr.call]), a copy is created for each coroutine parameter. For a
+ // parameter of type cv T, the copy is a variable of type cv T with
+ // automatic storage duration that is direct-initialized from an xvalue of
+ // type T referring to the parameter.
for (auto *PD : FD->parameters()) {
if (PD->getType()->isDependentType())
continue;
@@ -1636,7 +1690,9 @@ bool Sema::buildCoroutineParameterMoves(SourceLocation Loc) {
CExpr = castForMoving(*this, PDRefExpr.get());
else
CExpr = PDRefExpr.get();
-
+ // [dcl.fct.def.coroutine]p13
+ // The initialization and destruction of each parameter copy occurs in the
+ // context of the called coroutine.
auto D = buildVarDecl(*this, Loc, PD->getType(), PD->getIdentifier());
AddInitializerToDecl(D, CExpr, /*DirectInit=*/true);
@@ -1661,43 +1717,53 @@ ClassTemplateDecl *Sema::lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc,
NamespaceDecl *&Namespace) {
if (!StdCoroutineTraitsCache) {
- NamespaceDecl *CoroNamespace = getStdNamespace();
- LookupResult Result(*this, &PP.getIdentifierTable().get("coroutine_traits"),
- FuncLoc, LookupOrdinaryName);
-
- if (!CoroNamespace || !LookupQualifiedName(Result, CoroNamespace)) {
- /// Look up in namespace std::experimental, for compatibility.
- /// TODO: Remove this extra lookup when <experimental/coroutine> is
- /// removed.
- CoroNamespace = lookupStdExperimentalNamespace();
- if (!CoroNamespace || !LookupQualifiedName(Result, CoroNamespace)) {
- Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
- << "std::coroutine_traits";
- return nullptr;
- }
+ // Because coroutines moved from std::experimental in the TS to std in
+ // C++20, we look in both places to give users time to transition their
+ // TS-specific code to C++20. Diagnostics are given when the TS usage is
+ // discovered.
+ // TODO: Become stricter when <experimental/coroutine> is removed.
+
+ auto const &TraitIdent = PP.getIdentifierTable().get("coroutine_traits");
+
+ NamespaceDecl *StdSpace = getStdNamespace();
+ LookupResult ResStd(*this, &TraitIdent, FuncLoc, LookupOrdinaryName);
+ bool InStd = StdSpace && LookupQualifiedName(ResStd, StdSpace);
+
+ NamespaceDecl *ExpSpace = lookupStdExperimentalNamespace();
+ LookupResult ResExp(*this, &TraitIdent, FuncLoc, LookupOrdinaryName);
+ bool InExp = ExpSpace && LookupQualifiedName(ResExp, ExpSpace);
+
+ if (!InStd && !InExp) {
+ // The goggles, they found nothing!
+ Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
+ << "std::coroutine_traits";
+ return nullptr;
+ }
+
+ if (!InStd) {
+ // Found only in std::experimental.
Diag(KwLoc, diag::warn_deprecated_coroutine_namespace)
<< "coroutine_traits";
- } else {
- /// When we found coroutine_traits in std namespace. Make sure there is no
- /// misleading definition in std::experimental namespace.
- NamespaceDecl *ExpNamespace = lookupStdExperimentalNamespace();
- LookupResult ExpResult(*this,
- &PP.getIdentifierTable().get("coroutine_traits"),
- FuncLoc, LookupOrdinaryName);
- if (ExpNamespace && LookupQualifiedName(ExpResult, ExpNamespace)) {
- Diag(KwLoc,
- diag::err_mixed_use_std_and_experimental_namespace_for_coroutine);
- return nullptr;
- }
+ } else if (InExp) {
+ // Found in std and std::experimental.
+ Diag(KwLoc,
+ diag::err_mixed_use_std_and_experimental_namespace_for_coroutine);
+ Diag(KwLoc, diag::warn_deprecated_coroutine_namespace)
+ << "coroutine_traits";
+ return nullptr;
}
+ // Prefer ::std to std::experimental.
+ auto &Result = InStd ? ResStd : ResExp;
+ CoroTraitsNamespaceCache = InStd ? StdSpace : ExpSpace;
+
+ // coroutine_traits is required to be a class template.
if (!(StdCoroutineTraitsCache = Result.getAsSingle<ClassTemplateDecl>())) {
Result.suppressDiagnostics();
NamedDecl *Found = *Result.begin();
Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
return nullptr;
}
- CoroTraitsNamespaceCache = CoroNamespace;
}
Namespace = CoroTraitsNamespaceCache;
return StdCoroutineTraitsCache;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index 7be71ca49ea2..3c58f1d19c04 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -372,6 +372,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
}
NamedDecl *IIDecl = nullptr;
+ UsingShadowDecl *FoundUsingShadow = nullptr;
switch (Result.getResultKind()) {
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
@@ -441,8 +442,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
(AllowDeducedTemplate && getAsTypeTemplateDecl(RealRes))) {
if (!IIDecl ||
// Make the selection of the recovery decl deterministic.
- RealRes->getLocation() < IIDecl->getLocation())
+ RealRes->getLocation() < IIDecl->getLocation()) {
IIDecl = RealRes;
+ FoundUsingShadow = dyn_cast<UsingShadowDecl>(*Res);
+ }
}
}
@@ -465,6 +468,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
case LookupResult::Found:
IIDecl = Result.getFoundDecl();
+ FoundUsingShadow = dyn_cast<UsingShadowDecl>(*Result.begin());
break;
}
@@ -491,14 +495,20 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
(void)DiagnoseUseOfDecl(IDecl, NameLoc);
if (!HasTrailingDot)
T = Context.getObjCInterfaceType(IDecl);
+ FoundUsingShadow = nullptr; // FIXME: Target must be a TypeDecl.
} else if (auto *UD = dyn_cast<UnresolvedUsingIfExistsDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(UD, NameLoc);
// Recover with 'int'
T = Context.IntTy;
+ FoundUsingShadow = nullptr;
} else if (AllowDeducedTemplate) {
- if (auto *TD = getAsTypeTemplateDecl(IIDecl))
+ if (auto *TD = getAsTypeTemplateDecl(IIDecl)) {
+ // FIXME: TemplateName should include FoundUsingShadow sugar.
T = Context.getDeducedTemplateSpecializationType(TemplateName(TD),
QualType(), false);
+ // Don't wrap in a further UsingType.
+ FoundUsingShadow = nullptr;
+ }
}
if (T.isNull()) {
@@ -507,6 +517,9 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
return nullptr;
}
+ if (FoundUsingShadow)
+ T = Context.getUsingType(FoundUsingShadow, T);
+
// NOTE: avoid constructing an ElaboratedType(Loc) if this is a
// constructor or destructor name (in such a case, the scope specifier
// will be attached to the enclosing Expr or Decl node).
@@ -843,21 +856,6 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
return false;
}
-/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
-static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
- QualType T, SourceLocation NameLoc) {
- ASTContext &Context = S.Context;
-
- TypeLocBuilder Builder;
- Builder.pushTypeSpec(T).setNameLoc(NameLoc);
-
- T = S.getElaboratedType(ETK_None, SS, T);
- ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
- ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
- return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
-}
-
Sema::NameClassification Sema::ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
@@ -1134,14 +1132,28 @@ Corrected:
: NameClassification::TypeTemplate(Template);
}
+ auto BuildTypeFor = [&](TypeDecl *Type, NamedDecl *Found) {
+ QualType T = Context.getTypeDeclType(Type);
+ if (const auto *USD = dyn_cast<UsingShadowDecl>(Found))
+ T = Context.getUsingType(USD, T);
+
+ if (SS.isEmpty()) // No elaborated type, trivial location info
+ return ParsedType::make(T);
+
+ TypeLocBuilder Builder;
+ Builder.pushTypeSpec(T).setNameLoc(NameLoc);
+ T = getElaboratedType(ETK_None, SS, T);
+ ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+ };
+
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
DiagnoseUseOfDecl(Type, NameLoc);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
- QualType T = Context.getTypeDeclType(Type);
- if (SS.isNotEmpty())
- return buildNestedType(*this, SS, T, NameLoc);
- return ParsedType::make(T);
+ return BuildTypeFor(Type, *Result.begin());
}
ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
@@ -1190,10 +1202,7 @@ Corrected:
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
TypeDecl *Type = Result.getAsSingle<TypeDecl>();
DiagnoseUseOfDecl(Type, NameLoc);
- QualType T = Context.getTypeDeclType(Type);
- if (SS.isNotEmpty())
- return buildNestedType(*this, SS, T, NameLoc);
- return ParsedType::make(T);
+ return BuildTypeFor(Type, *Result.begin());
}
// If we already know which single declaration is referenced, just annotate
@@ -8504,7 +8513,14 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
<< NewFD->getParamDecl(Idx - 1)->getType();
} else if (FDisConst != NewFDisConst) {
SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
- << NewFDisConst << FD->getSourceRange().getEnd();
+ << NewFDisConst << FD->getSourceRange().getEnd()
+ << (NewFDisConst
+ ? FixItHint::CreateRemoval(ExtraArgs.D.getFunctionTypeInfo()
+ .getConstQualifierLoc())
+ : FixItHint::CreateInsertion(ExtraArgs.D.getFunctionTypeInfo()
+ .getRParenLoc()
+ .getLocWithOffset(1),
+ " const"));
} else
SemaRef.Diag(FD->getLocation(),
IsMember ? diag::note_member_def_close_match
@@ -9195,6 +9211,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< Name << RemoveRange
<< FixItHint::CreateRemoval(RemoveRange)
<< FixItHint::CreateInsertion(InsertLoc, "<>");
+ Invalid = true;
}
}
} else {
@@ -15359,7 +15376,7 @@ bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
if (BT->isInteger())
return false;
- if (T->isExtIntType())
+ if (T->isBitIntType())
return false;
return Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
@@ -18277,7 +18294,7 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
// Emit one note for each of the remaining enum constants with
// the same value.
- for (auto *ECD : llvm::make_range(Vec->begin() + 1, Vec->end()))
+ for (auto *ECD : llvm::drop_begin(*Vec))
S.Diag(ECD->getLocation(), diag::note_duplicate_element)
<< ECD << toString(ECD->getInitVal(), 10)
<< ECD->getSourceRange();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index 4df8687aff89..b6bd2e69629d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -1001,6 +1001,84 @@ public:
};
}
+static void handleDiagnoseAsBuiltinAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ const auto *DeclFD = cast<FunctionDecl>(D);
+
+ if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(DeclFD))
+ if (!MethodDecl->isStatic()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_no_member_function) << AL;
+ return;
+ }
+
+ auto DiagnoseType = [&](unsigned Index, AttributeArgumentNType T) {
+ SourceLocation Loc = [&]() {
+ auto Union = AL.getArg(Index - 1);
+ if (Union.is<Expr *>())
+ return Union.get<Expr *>()->getBeginLoc();
+ return Union.get<IdentifierLoc *>()->Loc;
+ }();
+
+ S.Diag(Loc, diag::err_attribute_argument_n_type) << AL << Index << T;
+ };
+
+ FunctionDecl *AttrFD = [&]() -> FunctionDecl * {
+ if (!AL.isArgExpr(0))
+ return nullptr;
+ auto *F = dyn_cast_or_null<DeclRefExpr>(AL.getArgAsExpr(0));
+ if (!F)
+ return nullptr;
+ return dyn_cast_or_null<FunctionDecl>(F->getFoundDecl());
+ }();
+
+ if (!AttrFD || !AttrFD->getBuiltinID(true)) {
+ DiagnoseType(1, AANT_ArgumentBuiltinFunction);
+ return;
+ }
+
+ if (AttrFD->getNumParams() != AL.getNumArgs() - 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments_for)
+ << AL << AttrFD << AttrFD->getNumParams();
+ return;
+ }
+
+ SmallVector<unsigned, 8> Indices;
+
+ for (unsigned I = 1; I < AL.getNumArgs(); ++I) {
+ if (!AL.isArgExpr(I)) {
+ DiagnoseType(I + 1, AANT_ArgumentIntegerConstant);
+ return;
+ }
+
+ const Expr *IndexExpr = AL.getArgAsExpr(I);
+ uint32_t Index;
+
+ if (!checkUInt32Argument(S, AL, IndexExpr, Index, I + 1, false))
+ return;
+
+ if (Index > DeclFD->getNumParams()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_bounds_for_function)
+ << AL << Index << DeclFD << DeclFD->getNumParams();
+ return;
+ }
+
+ QualType T1 = AttrFD->getParamDecl(I - 1)->getType();
+ QualType T2 = DeclFD->getParamDecl(Index - 1)->getType();
+
+ if (T1.getCanonicalType().getUnqualifiedType() !=
+ T2.getCanonicalType().getUnqualifiedType()) {
+ S.Diag(IndexExpr->getBeginLoc(), diag::err_attribute_parameter_types)
+ << AL << Index << DeclFD << T2 << I << AttrFD << T1;
+ return;
+ }
+
+ Indices.push_back(Index - 1);
+ }
+
+ D->addAttr(::new (S.Context) DiagnoseAsBuiltinAttr(
+ S.Context, AL, AttrFD, Indices.data(), Indices.size()));
+}
+
static void handleDiagnoseIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), diag::ext_clang_diagnose_if);
@@ -4502,7 +4580,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
return;
}
bool IntegralOrAnyEnumType = (OldElemTy->isIntegralOrEnumerationType() &&
- !OldElemTy->isExtIntType()) ||
+ !OldElemTy->isBitIntType()) ||
OldElemTy->getAs<EnumType>();
if (!OldElemTy->getAs<BuiltinType>() && !OldElemTy->isComplexType() &&
@@ -8159,6 +8237,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_DiagnoseIf:
handleDiagnoseIfAttr(S, D, AL);
break;
+ case ParsedAttr::AT_DiagnoseAsBuiltin:
+ handleDiagnoseAsBuiltinAttr(S, D, AL);
+ break;
case ParsedAttr::AT_NoBuiltin:
handleNoBuiltinAttr(S, D, AL);
break;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index 2658e9698688..01f0079198c7 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -8435,9 +8435,6 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
DefaultedComparisonKind DCK) {
assert(DCK != DefaultedComparisonKind::None && "not a defaulted comparison");
- CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(FD->getLexicalDeclContext());
- assert(RD && "defaulted comparison is not defaulted in a class");
-
// Perform any unqualified lookups we're going to need to default this
// function.
if (S) {
@@ -8455,43 +8452,17 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// const C&, or
// -- a friend of C having two parameters of type const C& or two
// parameters of type C.
- QualType ExpectedParmType1 = Context.getRecordType(RD);
- QualType ExpectedParmType2 =
- Context.getLValueReferenceType(ExpectedParmType1.withConst());
- if (isa<CXXMethodDecl>(FD))
- ExpectedParmType1 = ExpectedParmType2;
- for (const ParmVarDecl *Param : FD->parameters()) {
- if (!Param->getType()->isDependentType() &&
- !Context.hasSameType(Param->getType(), ExpectedParmType1) &&
- !Context.hasSameType(Param->getType(), ExpectedParmType2)) {
- // Don't diagnose an implicit 'operator=='; we will have diagnosed the
- // corresponding defaulted 'operator<=>' already.
- if (!FD->isImplicit()) {
- Diag(FD->getLocation(), diag::err_defaulted_comparison_param)
- << (int)DCK << Param->getType() << ExpectedParmType1
- << !isa<CXXMethodDecl>(FD)
- << ExpectedParmType2 << Param->getSourceRange();
- }
- return true;
- }
- }
- if (FD->getNumParams() == 2 &&
- !Context.hasSameType(FD->getParamDecl(0)->getType(),
- FD->getParamDecl(1)->getType())) {
- if (!FD->isImplicit()) {
- Diag(FD->getLocation(), diag::err_defaulted_comparison_param_mismatch)
- << (int)DCK
- << FD->getParamDecl(0)->getType()
- << FD->getParamDecl(0)->getSourceRange()
- << FD->getParamDecl(1)->getType()
- << FD->getParamDecl(1)->getSourceRange();
- }
- return true;
- }
- // ... non-static const member ...
- if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(FD->getLexicalDeclContext());
+ bool IsMethod = isa<CXXMethodDecl>(FD);
+ if (IsMethod) {
+ auto *MD = cast<CXXMethodDecl>(FD);
assert(!MD->isStatic() && "comparison function cannot be a static member");
+
+ // If we're out-of-class, this is the class we're comparing.
+ if (!RD)
+ RD = MD->getParent();
+
if (!MD->isConst()) {
SourceLocation InsertLoc;
if (FunctionTypeLoc Loc = MD->getFunctionTypeLoc())
@@ -8500,7 +8471,7 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// corresponding defaulted 'operator<=>' already.
if (!MD->isImplicit()) {
Diag(MD->getLocation(), diag::err_defaulted_comparison_non_const)
- << (int)DCK << FixItHint::CreateInsertion(InsertLoc, " const");
+ << (int)DCK << FixItHint::CreateInsertion(InsertLoc, " const");
}
// Add the 'const' to the type to recover.
@@ -8510,9 +8481,98 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
MD->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
}
- } else {
- // A non-member function declared in a class must be a friend.
+ }
+
+ if (FD->getNumParams() != (IsMethod ? 1 : 2)) {
+ // Let's not worry about using a variadic template pack here -- who would do
+ // such a thing?
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_num_args)
+ << int(IsMethod) << int(DCK);
+ return true;
+ }
+
+ const ParmVarDecl *KnownParm = nullptr;
+ for (const ParmVarDecl *Param : FD->parameters()) {
+ QualType ParmTy = Param->getType();
+ if (ParmTy->isDependentType())
+ continue;
+ if (!KnownParm) {
+ auto CTy = ParmTy;
+ // Is it `T const &`?
+ bool Ok = !IsMethod;
+ QualType ExpectedTy;
+ if (RD)
+ ExpectedTy = Context.getRecordType(RD);
+ if (auto *Ref = CTy->getAs<ReferenceType>()) {
+ CTy = Ref->getPointeeType();
+ if (RD)
+ ExpectedTy.addConst();
+ Ok = true;
+ }
+
+ // Is T a class?
+ if (!Ok) {
+ } else if (RD) {
+ if (!RD->isDependentType() && !Context.hasSameType(CTy, ExpectedTy))
+ Ok = false;
+ } else if (auto *CRD = CTy->getAsRecordDecl()) {
+ RD = cast<CXXRecordDecl>(CRD);
+ } else {
+ Ok = false;
+ }
+
+ if (Ok) {
+ KnownParm = Param;
+ } else {
+ // Don't diagnose an implicit 'operator=='; we will have diagnosed the
+ // corresponding defaulted 'operator<=>' already.
+ if (!FD->isImplicit()) {
+ if (RD) {
+ QualType PlainTy = Context.getRecordType(RD);
+ QualType RefTy =
+ Context.getLValueReferenceType(PlainTy.withConst());
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_param)
+ << int(DCK) << ParmTy << RefTy << int(!IsMethod) << PlainTy
+ << Param->getSourceRange();
+ } else {
+ assert(!IsMethod && "should know expected type for method");
+ Diag(FD->getLocation(),
+ diag::err_defaulted_comparison_param_unknown)
+ << int(DCK) << ParmTy << Param->getSourceRange();
+ }
+ }
+ return true;
+ }
+ } else if (!Context.hasSameType(KnownParm->getType(), ParmTy)) {
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_param_mismatch)
+ << int(DCK) << KnownParm->getType() << KnownParm->getSourceRange()
+ << ParmTy << Param->getSourceRange();
+ return true;
+ }
+ }
+
+ assert(RD && "must have determined class");
+ if (IsMethod) {
+ } else if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ // In-class, must be a friend decl.
assert(FD->getFriendObjectKind() && "expected a friend declaration");
+ } else {
+ // Out of class, require the defaulted comparison to be a friend (of a
+ // complete type).
+ if (RequireCompleteType(FD->getLocation(), Context.getRecordType(RD),
+ diag::err_defaulted_comparison_not_friend, int(DCK),
+ int(1)))
+ return true;
+
+ if (llvm::find_if(RD->friends(), [&](const FriendDecl *F) {
+ return FD->getCanonicalDecl() ==
+ F->getFriendDecl()->getCanonicalDecl();
+ }) == RD->friends().end()) {
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_not_friend)
+ << int(DCK) << int(0) << RD;
+ Diag(RD->getCanonicalDecl()->getLocation(), diag::note_declared_at);
+ return true;
+ }
}
// C++2a [class.eq]p1, [class.rel]p1:
@@ -8670,7 +8730,10 @@ void Sema::DefineDefaultedComparison(SourceLocation UseLoc, FunctionDecl *FD,
{
// Build and set up the function body.
- CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getLexicalParent());
+ // The first parameter has type maybe-ref-to maybe-const T, use that to get
+ // the type of the class being compared.
+ auto PT = FD->getParamDecl(0)->getType();
+ CXXRecordDecl *RD = PT.getNonReferenceType()->getAsCXXRecordDecl();
SourceLocation BodyLoc =
FD->getEndLoc().isValid() ? FD->getEndLoc() : FD->getLocation();
StmtResult Body =
@@ -16146,6 +16209,23 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext, ExternLoc,
LangStr->getExprLoc(), Language,
LBraceLoc.isValid());
+
+ /// C++ [module.unit]p7.2.3
+ /// - Otherwise, if the declaration
+ /// - ...
+ /// - ...
+ /// - appears within a linkage-specification,
+ /// it is attached to the global module.
+ ///
+ /// If the declaration is already in global module fragment, we don't
+ /// need to attach it again.
+ if (getLangOpts().CPlusPlusModules && isCurrentModulePurview()) {
+ Module *GlobalModule =
+ PushGlobalModuleFragment(ExternLoc, /*IsImplicit=*/true);
+ D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
+ D->setLocalOwningModule(GlobalModule);
+ }
+
CurContext->addDecl(D);
PushDeclContext(S, D);
return D;
@@ -16162,6 +16242,14 @@ Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
LinkageSpecDecl* LSDecl = cast<LinkageSpecDecl>(LinkageSpec);
LSDecl->setRBraceLoc(RBraceLoc);
}
+
+ // If the current module doesn't has Parent, it implies that the
+ // LinkageSpec isn't in the module created by itself. So we don't
+ // need to pop it.
+ if (getLangOpts().CPlusPlusModules && getCurrentModule() &&
+ getCurrentModule()->isGlobalModule() && getCurrentModule()->Parent)
+ PopGlobalModuleFragment();
+
PopDeclContext();
return LinkageSpec;
}
@@ -17155,13 +17243,6 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
return;
}
- if (DefKind.isComparison() &&
- !isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
- Diag(FD->getLocation(), diag::err_defaulted_comparison_out_of_class)
- << (int)DefKind.asComparison();
- return;
- }
-
// Issue compatibility warning. We already warned if the operator is
// 'operator<=>' when parsing the '<=>' token.
if (DefKind.isComparison() &&
@@ -17183,31 +17264,40 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
// that we've marked it as defaulted.
FD->setWillHaveBody(false);
- // If this definition appears within the record, do the checking when
- // the record is complete. This is always the case for a defaulted
- // comparison.
- if (DefKind.isComparison())
- return;
- auto *MD = cast<CXXMethodDecl>(FD);
-
- const FunctionDecl *Primary = FD;
- if (const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern())
- // Ask the template instantiation pattern that actually had the
- // '= default' on it.
- Primary = Pattern;
+ if (DefKind.isComparison()) {
+ // If this comparison's defaulting occurs within the definition of its
+ // lexical class context, we have to do the checking when complete.
+ if (auto const *RD = dyn_cast<CXXRecordDecl>(FD->getLexicalDeclContext()))
+ if (!RD->isCompleteDefinition())
+ return;
+ }
- // If the method was defaulted on its first declaration, we will have
+ // If this member fn was defaulted on its first declaration, we will have
// already performed the checking in CheckCompletedCXXClass. Such a
// declaration doesn't trigger an implicit definition.
- if (Primary->getCanonicalDecl()->isDefaulted())
- return;
+ if (isa<CXXMethodDecl>(FD)) {
+ const FunctionDecl *Primary = FD;
+ if (const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern())
+ // Ask the template instantiation pattern that actually had the
+ // '= default' on it.
+ Primary = Pattern;
+ if (Primary->getCanonicalDecl()->isDefaulted())
+ return;
+ }
- // FIXME: Once we support defining comparisons out of class, check for a
- // defaulted comparison here.
- if (CheckExplicitlyDefaultedSpecialMember(MD, DefKind.asSpecialMember()))
- MD->setInvalidDecl();
- else
- DefineDefaultedFunction(*this, MD, DefaultLoc);
+ if (DefKind.isComparison()) {
+ if (CheckExplicitlyDefaultedComparison(nullptr, FD, DefKind.asComparison()))
+ FD->setInvalidDecl();
+ else
+ DefineDefaultedComparison(DefaultLoc, FD, DefKind.asComparison());
+ } else {
+ auto *MD = cast<CXXMethodDecl>(FD);
+
+ if (CheckExplicitlyDefaultedSpecialMember(MD, DefKind.asSpecialMember()))
+ MD->setInvalidDecl();
+ else
+ DefineDefaultedFunction(*this, MD, DefaultLoc);
+ }
}
static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index b305d4e5b92f..d32b3f217aa0 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -4384,7 +4384,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::ObjCObjectPointer:
case Type::ObjCTypeParam:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
T = cast<AdjustedType>(Ty)->getOriginalType();
@@ -4443,6 +4443,9 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::Decltype:
T = cast<DecltypeType>(Ty)->desugar();
break;
+ case Type::Using:
+ T = cast<UsingType>(Ty)->desugar();
+ break;
case Type::Auto:
case Type::DeducedTemplateSpecialization:
T = cast<DeducedType>(Ty)->getDeducedType();
@@ -8388,9 +8391,10 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// If both operands have arithmetic type, do the usual arithmetic conversions
// to find a common type: C99 6.5.15p3,5.
if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
- // Disallow invalid arithmetic conversions, such as those between ExtInts of
- // different sizes, or between ExtInts and other types.
- if (ResTy.isNull() && (LHSTy->isExtIntType() || RHSTy->isExtIntType())) {
+ // Disallow invalid arithmetic conversions, such as those between bit-
+ // precise integers types of different sizes, or between a bit-precise
+ // integer and another type.
+ if (ResTy.isNull() && (LHSTy->isBitIntType() || RHSTy->isBitIntType())) {
Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
<< LHSTy << RHSTy << LHS.get()->getSourceRange()
<< RHS.get()->getSourceRange();
@@ -10974,7 +10978,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
QualType LHSExprType = LHS.get()->getType();
uint64_t LeftSize = S.Context.getTypeSize(LHSExprType);
- if (LHSExprType->isExtIntType())
+ if (LHSExprType->isBitIntType())
LeftSize = S.Context.getIntWidth(LHSExprType);
else if (LHSExprType->isFixedPointType()) {
auto FXSema = S.Context.getFixedPointSemantics(LHSExprType);
@@ -12260,27 +12264,32 @@ QualType Sema::GetSignedVectorType(QualType V) {
if (isa<ExtVectorType>(VTy)) {
if (TypeSize == Context.getTypeSize(Context.CharTy))
return Context.getExtVectorType(Context.CharTy, VTy->getNumElements());
- else if (TypeSize == Context.getTypeSize(Context.ShortTy))
+ if (TypeSize == Context.getTypeSize(Context.ShortTy))
return Context.getExtVectorType(Context.ShortTy, VTy->getNumElements());
- else if (TypeSize == Context.getTypeSize(Context.IntTy))
+ if (TypeSize == Context.getTypeSize(Context.IntTy))
return Context.getExtVectorType(Context.IntTy, VTy->getNumElements());
- else if (TypeSize == Context.getTypeSize(Context.LongTy))
+ if (TypeSize == Context.getTypeSize(Context.Int128Ty))
+ return Context.getExtVectorType(Context.Int128Ty, VTy->getNumElements());
+ if (TypeSize == Context.getTypeSize(Context.LongTy))
return Context.getExtVectorType(Context.LongTy, VTy->getNumElements());
assert(TypeSize == Context.getTypeSize(Context.LongLongTy) &&
"Unhandled vector element size in vector compare");
return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements());
}
+ if (TypeSize == Context.getTypeSize(Context.Int128Ty))
+ return Context.getVectorType(Context.Int128Ty, VTy->getNumElements(),
+ VectorType::GenericVector);
if (TypeSize == Context.getTypeSize(Context.LongLongTy))
return Context.getVectorType(Context.LongLongTy, VTy->getNumElements(),
VectorType::GenericVector);
- else if (TypeSize == Context.getTypeSize(Context.LongTy))
+ if (TypeSize == Context.getTypeSize(Context.LongTy))
return Context.getVectorType(Context.LongTy, VTy->getNumElements(),
VectorType::GenericVector);
- else if (TypeSize == Context.getTypeSize(Context.IntTy))
+ if (TypeSize == Context.getTypeSize(Context.IntTy))
return Context.getVectorType(Context.IntTy, VTy->getNumElements(),
VectorType::GenericVector);
- else if (TypeSize == Context.getTypeSize(Context.ShortTy))
+ if (TypeSize == Context.getTypeSize(Context.ShortTy))
return Context.getVectorType(Context.ShortTy, VTy->getNumElements(),
VectorType::GenericVector);
assert(TypeSize == Context.getTypeSize(Context.CharTy) &&
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index d25f329f85e4..54f0242d2ca1 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -1346,7 +1346,7 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// implicitly capturing the *enclosing object* by reference (see loop
// above)).
assert((!ByCopy ||
- dyn_cast<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
+ isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
"Only a lambda can capture the enclosing object (referred to by "
"*this) by copy");
QualType ThisTy = getCurrentThisType();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index 0711e6d89383..635e93ba8460 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -2935,7 +2935,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
case Type::ExtVector:
case Type::ConstantMatrix:
case Type::Complex:
- case Type::ExtInt:
+ case Type::BitInt:
break;
// Non-deduced auto types only get here for error cases.
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
index af95b1a93cc4..a4b9f3c242c1 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
@@ -68,15 +68,8 @@ Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
// We start in the global module; all those declarations are implicitly
// module-private (though they do not have module linkage).
- auto &Map = PP.getHeaderSearchInfo().getModuleMap();
- auto *GlobalModule = Map.createGlobalModuleFragmentForModuleUnit(ModuleLoc);
- assert(GlobalModule && "module creation should not fail");
-
- // Enter the scope of the global module.
- ModuleScopes.push_back({});
- ModuleScopes.back().BeginLoc = ModuleLoc;
- ModuleScopes.back().Module = GlobalModule;
- VisibleModules.setVisible(GlobalModule, ModuleLoc);
+ Module *GlobalModule =
+ PushGlobalModuleFragment(ModuleLoc, /*IsImplicit=*/false);
// All declarations created from now on are owned by the global module.
auto *TU = Context.getTranslationUnitDecl();
@@ -390,11 +383,18 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
if (!ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, Import);
- // Re-export the module if needed.
if (!ModuleScopes.empty() && ModuleScopes.back().ModuleInterface) {
+ // Re-export the module if the imported module is exported.
+ // Note that we don't need to add re-exported module to Imports field
+ // since `Exports` implies the module is imported already.
if (ExportLoc.isValid() || getEnclosingExportDecl(Import))
getCurrentModule()->Exports.emplace_back(Mod, false);
+ else
+ getCurrentModule()->Imports.insert(Mod);
} else if (ExportLoc.isValid()) {
+ // [module.interface]p1:
+ // An export-declaration shall inhabit a namespace scope and appear in the
+ // purview of a module interface unit.
Diag(ExportLoc, diag::err_export_not_in_module_interface);
}
@@ -708,3 +708,26 @@ Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
return D;
}
+
+Module *Sema::PushGlobalModuleFragment(SourceLocation BeginLoc,
+ bool IsImplicit) {
+ ModuleMap &Map = PP.getHeaderSearchInfo().getModuleMap();
+ Module *GlobalModule =
+ Map.createGlobalModuleFragmentForModuleUnit(BeginLoc, getCurrentModule());
+ assert(GlobalModule && "module creation should not fail");
+
+ // Enter the scope of the global module.
+ ModuleScopes.push_back({BeginLoc, GlobalModule,
+ /*ModuleInterface=*/false,
+ /*ImplicitGlobalModuleFragment=*/IsImplicit,
+ /*VisibleModuleSet*/{}});
+ VisibleModules.setVisible(GlobalModule, BeginLoc);
+
+ return GlobalModule;
+}
+
+void Sema::PopGlobalModuleFragment() {
+ assert(!ModuleScopes.empty() && getCurrentModule()->isGlobalModule() &&
+ "left the wrong module scope, which is not global module fragment");
+ ModuleScopes.pop_back();
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
index 22ae5f59d41b..ba0481874577 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
@@ -255,14 +255,14 @@ private:
return &Stack.back().first[Size - 1];
}
const SharingMapTy *getTopOfStackOrNull() const {
- return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
+ return const_cast<DSAStackTy &>(*this).getTopOfStackOrNull();
}
SharingMapTy &getTopOfStack() {
assert(!isStackEmpty() && "no current directive");
return *getTopOfStackOrNull();
}
const SharingMapTy &getTopOfStack() const {
- return const_cast<DSAStackTy&>(*this).getTopOfStack();
+ return const_cast<DSAStackTy &>(*this).getTopOfStack();
}
SharingMapTy *getSecondOnStackOrNull() {
@@ -272,7 +272,7 @@ private:
return &Stack.back().first[Size - 2];
}
const SharingMapTy *getSecondOnStackOrNull() const {
- return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
+ return const_cast<DSAStackTy &>(*this).getSecondOnStackOrNull();
}
/// Get the stack element at a certain level (previously returned by
@@ -286,7 +286,7 @@ private:
return Stack.back().first[Level];
}
const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
- return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
+ return const_cast<DSAStackTy &>(*this).getStackElemAtLevel(Level);
}
DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
@@ -354,9 +354,7 @@ public:
const SharingMapTy *Top = getTopOfStackOrNull();
return Top && Top->BodyComplete;
}
- void setBodyComplete() {
- getTopOfStack().BodyComplete = true;
- }
+ void setBodyComplete() { getTopOfStack().BodyComplete = true; }
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
@@ -392,6 +390,7 @@ public:
class ParentDirectiveScope {
DSAStackTy &Self;
bool Active;
+
public:
ParentDirectiveScope(DSAStackTy &Self, bool Activate)
: Self(Self), Active(false) {
@@ -433,8 +432,7 @@ public:
}
/// Marks (or clears) declaration as possibly loop counter.
void resetPossibleLoopCounter(const Decl *D = nullptr) {
- getTopOfStack().PossiblyLoopCounter =
- D ? D->getCanonicalDecl() : D;
+ getTopOfStack().PossiblyLoopCounter = D ? D->getCanonicalDecl() : D;
}
/// Gets the possible loop counter decl.
const Decl *getPossiblyLoopCunter() const {
@@ -631,13 +629,10 @@ public:
}
/// Add requires decl to internal vector
- void addRequiresDecl(OMPRequiresDecl *RD) {
- RequiresDecls.push_back(RD);
- }
+ void addRequiresDecl(OMPRequiresDecl *RD) { RequiresDecls.push_back(RD); }
/// Checks if the defined 'requires' directive has specified type of clause.
- template <typename ClauseType>
- bool hasRequiresDeclWithClause() const {
+ template <typename ClauseType> bool hasRequiresDeclWithClause() const {
return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
return isa<ClauseType>(C);
@@ -680,9 +675,7 @@ public:
/// Returns the location of the first encountered atomic directive in the
/// module.
- SourceLocation getAtomicDirectiveLoc() const {
- return AtomicLocation;
- }
+ SourceLocation getAtomicDirectiveLoc() const { return AtomicLocation; }
// Return previously encountered target region locations.
ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
@@ -706,8 +699,7 @@ public:
}
/// Set default data mapping attribute to Modifier:Kind
void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
- OpenMPDefaultmapClauseKind Kind,
- SourceLocation Loc) {
+ OpenMPDefaultmapClauseKind Kind, SourceLocation Loc) {
DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
DMI.ImplicitBehavior = M;
DMI.SLoc = Loc;
@@ -749,12 +741,10 @@ public:
: getStackElemAtLevel(Level).DefaultAttr;
}
DefaultDataSharingAttributes getDefaultDSA() const {
- return isStackEmpty() ? DSA_unspecified
- : getTopOfStack().DefaultAttr;
+ return isStackEmpty() ? DSA_unspecified : getTopOfStack().DefaultAttr;
}
SourceLocation getDefaultDSALocation() const {
- return isStackEmpty() ? SourceLocation()
- : getTopOfStack().DefaultAttrLoc;
+ return isStackEmpty() ? SourceLocation() : getTopOfStack().DefaultAttrLoc;
}
OpenMPDefaultmapClauseModifier
getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
@@ -1457,8 +1447,7 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
- Expr *&TaskgroupReductionRef =
- getTopOfStack().TaskgroupReductionRef;
+ Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
@@ -1483,8 +1472,7 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
- Expr *&TaskgroupReductionRef =
- getTopOfStack().TaskgroupReductionRef;
+ Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
@@ -1595,10 +1583,9 @@ static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
ASTContext &Context = SemaRef.getASTContext();
bool IsClassType;
if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
- unsigned Diag = ListItemNotVar
- ? diag::err_omp_const_list_item
- : IsClassType ? diag::err_omp_const_not_mutable_variable
- : diag::err_omp_const_variable;
+ unsigned Diag = ListItemNotVar ? diag::err_omp_const_list_item
+ : IsClassType ? diag::err_omp_const_not_mutable_variable
+ : diag::err_omp_const_variable;
SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
if (!ListItemNotVar && D) {
const VarDecl *VD = dyn_cast<VarDecl>(D);
@@ -1658,8 +1645,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
});
if (IterTarget != end()) {
const_iterator ParentIterTarget = IterTarget + 1;
- for (const_iterator Iter = begin();
- Iter != ParentIterTarget; ++Iter) {
+ for (const_iterator Iter = begin(); Iter != ParentIterTarget; ++Iter) {
if (isOpenMPLocal(VD, Iter)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
@@ -1677,9 +1663,9 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
return DVar;
}
const_iterator End = end();
- if (!SemaRef.isOpenMPCapturedByRef(
- D, std::distance(ParentIterTarget, End),
- /*OpenMPCaptureLevel=*/0)) {
+ if (!SemaRef.isOpenMPCapturedByRef(D,
+ std::distance(ParentIterTarget, End),
+ /*OpenMPCaptureLevel=*/0)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
IterTarget->ConstructLoc);
@@ -1891,9 +1877,7 @@ void Sema::InitDataSharingAttributesStack() {
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
-void Sema::pushOpenMPFunctionRegion() {
- DSAStack->pushFunction();
-}
+void Sema::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
@@ -2070,8 +2054,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, Level,
- [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
- OMPClauseMappableExprCommon::MappableExprComponentListRef
+ [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection,
+ D](OMPClauseMappableExprCommon::MappableExprComponentListRef
MapExprComponents,
OpenMPClauseKind WhereFoundClauseKind) {
// Only the map clause information influences how a variable is
@@ -2248,7 +2232,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
bool OpenMPFound = false;
for (unsigned I = StopAt + 1; I > 0; --I) {
FunctionScopeInfo *FSI = FunctionScopes[I - 1];
- if(!isa<CapturingScopeInfo>(FSI))
+ if (!isa<CapturingScopeInfo>(FSI))
return nullptr;
if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
if (RSI->CapRegionKind == CR_OpenMP) {
@@ -2322,9 +2306,7 @@ void Sema::startOpenMPCXXRangeFor() {
OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
- if (DSAStack->hasExplicitDirective(
- [](OpenMPDirectiveKind K) { return isOpenMPTaskingDirective(K); },
- Level)) {
+ if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
bool IsTriviallyCopyable =
D->getType().getNonReferenceType().isTriviallyCopyableType(Context) &&
!D->getType()
@@ -2351,8 +2333,7 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
}
}
if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
- if (DSAStack->getAssociatedLoops() > 0 &&
- !DSAStack->isLoopStarted()) {
+ if (DSAStack->getAssociatedLoops() > 0 && !DSAStack->isLoopStarted()) {
DSAStack->resetPossibleLoopCounter(D);
DSAStack->loopStart();
return OMPC_private;
@@ -2519,16 +2500,16 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
<< HostDevTy;
return;
}
- if (!LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
- // Diagnose nohost function called during host codegen.
- StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
- Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
- Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
- diag::note_omp_marked_device_type_here)
- << NoHostDevTy;
- }
+ if (!LangOpts.OpenMPIsDevice && DevTy &&
+ *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
+ // Diagnose nohost function called during host codegen.
+ StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
+ OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
+ Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
+ Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
+ diag::note_omp_marked_device_type_here)
+ << NoHostDevTy;
+ }
}
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
@@ -2779,7 +2760,6 @@ public:
std::unique_ptr<CorrectionCandidateCallback> clone() override {
return std::make_unique<VarDeclFilterCCC>(*this);
}
-
};
class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
@@ -3151,9 +3131,10 @@ applyOMPAllocateAttribute(Sema &S, VarDecl *VD,
ML->DeclarationMarkedOpenMPAllocate(VD, A);
}
-Sema::DeclGroupPtrTy Sema::ActOnOpenMPAllocateDirective(
- SourceLocation Loc, ArrayRef<Expr *> VarList,
- ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
+Sema::DeclGroupPtrTy
+Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
+ ArrayRef<OMPClause *> Clauses,
+ DeclContext *Owner) {
assert(Clauses.size() <= 2 && "Expected at most two clauses.");
Expr *Alignment = nullptr;
Expr *Allocator = nullptr;
@@ -3500,7 +3481,8 @@ public:
return;
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
// Check the datasharing rules for the expressions in the clauses.
- if (!CS) {
+ if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
+ !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr)) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
Visit(CED->getInit());
@@ -3819,6 +3801,10 @@ public:
}
void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
for (OMPClause *C : S->clauses()) {
+ // Skip analysis of arguments of private clauses for task|target
+ // directives.
+ if (isa_and_nonnull<OMPPrivateClause>(C))
+ continue;
// Skip analysis of arguments of implicitly defined firstprivate clause
// for task|target directives.
// Skip analysis of arguments of implicitly defined map clause for target
@@ -3841,6 +3827,18 @@ public:
VisitStmt(S);
}
+ void VisitCallExpr(CallExpr *S) {
+ for (Stmt *C : S->arguments()) {
+ if (C) {
+ // Check implicitly captured variables in the task-based directives to
+ // check if they must be firstprivatized.
+ Visit(C);
+ }
+ }
+ if (Expr *Callee = S->getCallee())
+ if (auto *CE = dyn_cast<MemberExpr>(Callee->IgnoreParenImpCasts()))
+ Visit(CE->getBase());
+ }
void VisitStmt(Stmt *S) {
for (Stmt *C : S->children()) {
if (C) {
@@ -5089,8 +5087,8 @@ static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
!isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
!isa<FieldDecl>(ME->getMemberDecl()))) {
if (IsArrayExpr != NoArrayExpr) {
- S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
- << ERange;
+ S.Diag(ELoc, diag::err_omp_expected_base_var_name)
+ << IsArrayExpr << ERange;
} else {
S.Diag(ELoc,
AllowArraySection
@@ -5134,8 +5132,7 @@ static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
"Expected non-dependent context.");
auto AllocateRange =
llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
- llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
- DeclToCopy;
+ llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>> DeclToCopy;
auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
return isOpenMPPrivate(C->getClauseKind());
});
@@ -6018,7 +6015,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
break;
case OMPD_parallel_master:
Res = ActOnOpenMPParallelMasterDirective(ClausesWithImplicit, AStmt,
- StartLoc, EndLoc);
+ StartLoc, EndLoc);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_sections:
@@ -6357,6 +6354,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -7626,10 +7624,10 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
// != with increment is treated as <; != with decrement is treated as >
if (!TestIsLessOp.hasValue())
TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
- if (UB && (IsConstZero ||
- (TestIsLessOp.getValue() ?
- (IsConstNeg || (IsUnsigned && Subtract)) :
- (IsConstPos || (IsUnsigned && !Subtract))))) {
+ if (UB &&
+ (IsConstZero || (TestIsLessOp.getValue()
+ ? (IsConstNeg || (IsUnsigned && Subtract))
+ : (IsConstPos || (IsUnsigned && !Subtract))))) {
SemaRef.Diag(NewStep->getExprLoc(),
diag::err_omp_loop_incr_not_compatible)
<< LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
@@ -8068,11 +8066,13 @@ calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
return nullptr;
llvm::APSInt LRes, SRes;
bool IsLowerConst = false, IsStepConst = false;
- if (Optional<llvm::APSInt> Res = Lower->getIntegerConstantExpr(SemaRef.Context)) {
+ if (Optional<llvm::APSInt> Res =
+ Lower->getIntegerConstantExpr(SemaRef.Context)) {
LRes = *Res;
IsLowerConst = true;
}
- if (Optional<llvm::APSInt> Res = Step->getIntegerConstantExpr(SemaRef.Context)) {
+ if (Optional<llvm::APSInt> Res =
+ Step->getIntegerConstantExpr(SemaRef.Context)) {
SRes = *Res;
IsStepConst = true;
}
@@ -8110,7 +8110,8 @@ calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
}
llvm::APSInt URes;
bool IsUpperConst = false;
- if (Optional<llvm::APSInt> Res = Upper->getIntegerConstantExpr(SemaRef.Context)) {
+ if (Optional<llvm::APSInt> Res =
+ Upper->getIntegerConstantExpr(SemaRef.Context)) {
URes = *Res;
IsUpperConst = true;
}
@@ -8566,10 +8567,12 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
// TODO: this can be improved by calculating min/max values but not sure that
// it will be very effective.
if (CondDependOnLC || InitDependOnLC)
- return SemaRef.PerformImplicitConversion(
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get(),
- SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
- /*AllowExplicit=*/true).get();
+ return SemaRef
+ .PerformImplicitConversion(
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get(),
+ SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
+ /*AllowExplicit=*/true)
+ .get();
// Try to build LB <op> UB, where <op> is <, >, <=, or >=.
Sema::TentativeAnalysisScope Trap(SemaRef);
@@ -8579,12 +8582,11 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
if (!NewLB.isUsable() || !NewUB.isUsable())
return nullptr;
- ExprResult CondExpr =
- SemaRef.BuildBinOp(S, DefaultLoc,
- TestIsLessOp.getValue() ?
- (TestIsStrictOp ? BO_LT : BO_LE) :
- (TestIsStrictOp ? BO_GT : BO_GE),
- NewLB.get(), NewUB.get());
+ ExprResult CondExpr = SemaRef.BuildBinOp(
+ S, DefaultLoc,
+ TestIsLessOp.getValue() ? (TestIsStrictOp ? BO_LT : BO_LE)
+ : (TestIsStrictOp ? BO_GT : BO_GE),
+ NewLB.get(), NewUB.get());
if (CondExpr.isUsable()) {
if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
SemaRef.Context.BoolTy))
@@ -8804,6 +8806,9 @@ static bool checkOpenMPIterationSpace(
}
assert(((For && For->getBody()) || (CXXFor && CXXFor->getBody())) &&
"No loop body.");
+ // Postpone analysis in dependent contexts for ranged for loops.
+ if (CXXFor && SemaRef.CurContext->isDependentContext())
+ return false;
OpenMPIterationSpaceChecker ISC(SemaRef, SupportsNonRectangular, DSA,
For ? For->getForLoc() : CXXFor->getForLoc());
@@ -9684,9 +9689,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
ExprResult Iter;
// Compute prod
- ExprResult Prod =
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
- for (unsigned int K = Cnt+1; K < NestedLoopCount; ++K)
+ ExprResult Prod = SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
+ for (unsigned int K = Cnt + 1; K < NestedLoopCount; ++K)
Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Prod.get(),
IterSpaces[K].NumIterations);
@@ -9694,8 +9698,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// If there is at least one more inner loop to avoid
// multiplication by 1.
if (Cnt + 1 < NestedLoopCount)
- Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div,
- Acc.get(), Prod.get());
+ Iter =
+ SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, Acc.get(), Prod.get());
else
Iter = Acc;
if (!Iter.isUsable()) {
@@ -9708,12 +9712,11 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Check if there is at least one more inner loop to avoid
// multiplication by 1.
if (Cnt + 1 < NestedLoopCount)
- Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul,
- Iter.get(), Prod.get());
+ Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Iter.get(),
+ Prod.get());
else
Prod = Iter;
- Acc = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Sub,
- Acc.get(), Prod.get());
+ Acc = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Sub, Acc.get(), Prod.get());
// Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl());
@@ -10000,7 +10003,7 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
// All associated statements must be '#pragma omp section' except for
// the first one.
- for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
+ for (Stmt *SectionStmt : llvm::drop_begin(S)) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
Diag(SectionStmt->getBeginLoc(),
@@ -10382,7 +10385,7 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
// All associated statements must be '#pragma omp section' except for
// the first one.
- for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
+ for (Stmt *SectionStmt : llvm::drop_begin(S)) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
Diag(SectionStmt->getBeginLoc(),
@@ -10752,7 +10755,6 @@ private:
bool checkBinaryOperation(BinaryOperator *AtomicBinOp, unsigned DiagId = 0,
unsigned NoteId = 0);
};
-} // namespace
bool OpenMPAtomicUpdateChecker::checkBinaryOperation(
BinaryOperator *AtomicBinOp, unsigned DiagId, unsigned NoteId) {
@@ -10913,6 +10915,7 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
}
return ErrorFound != NoError;
}
+} // namespace
StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
@@ -10933,9 +10936,12 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
OpenMPClauseKind MemOrderKind = OMPC_unknown;
SourceLocation MemOrderLoc;
for (const OMPClause *C : Clauses) {
- if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write ||
- C->getClauseKind() == OMPC_update ||
- C->getClauseKind() == OMPC_capture) {
+ switch (C->getClauseKind()) {
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ case OMPC_capture:
+ case OMPC_compare: {
if (AtomicKind != OMPC_unknown) {
Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
<< SourceRange(C->getBeginLoc(), C->getEndLoc());
@@ -10945,12 +10951,13 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
AtomicKind = C->getClauseKind();
AtomicKindLoc = C->getBeginLoc();
}
+ break;
}
- if (C->getClauseKind() == OMPC_seq_cst ||
- C->getClauseKind() == OMPC_acq_rel ||
- C->getClauseKind() == OMPC_acquire ||
- C->getClauseKind() == OMPC_release ||
- C->getClauseKind() == OMPC_relaxed) {
+ case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed: {
if (MemOrderKind != OMPC_unknown) {
Diag(C->getBeginLoc(), diag::err_omp_several_mem_order_clauses)
<< getOpenMPDirectiveName(OMPD_atomic) << 0
@@ -10961,6 +10968,13 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
MemOrderKind = C->getClauseKind();
MemOrderLoc = C->getBeginLoc();
}
+ break;
+ }
+ // The following clauses are allowed, but we don't need to do anything here.
+ case OMPC_hint:
+ break;
+ default:
+ llvm_unreachable("unknown clause is encountered");
}
}
// OpenMP 5.0, 2.17.7 atomic Construct, Restrictions
@@ -11075,8 +11089,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (ErrorFound != NoError) {
Diag(ErrorLoc, diag::err_omp_atomic_read_not_expression_statement)
<< ErrorRange;
- Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
- << NoteRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write)
+ << ErrorFound << NoteRange;
return StmtError();
}
if (CurContext->isDependentContext())
@@ -11137,8 +11151,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (ErrorFound != NoError) {
Diag(ErrorLoc, diag::err_omp_atomic_write_not_expression_statement)
<< ErrorRange;
- Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
- << NoteRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write)
+ << ErrorFound << NoteRange;
return StmtError();
}
if (CurContext->isDependentContext())
@@ -11154,9 +11168,10 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// x = expr binop x;
OpenMPAtomicUpdateChecker Checker(*this);
if (Checker.checkStatement(
- Body, (AtomicKind == OMPC_update)
- ? diag::err_omp_atomic_update_not_expression_statement
- : diag::err_omp_atomic_not_expression_statement,
+ Body,
+ (AtomicKind == OMPC_update)
+ ? diag::err_omp_atomic_update_not_expression_statement
+ : diag::err_omp_atomic_not_expression_statement,
diag::note_omp_atomic_update))
return StmtError();
if (!CurContext->isDependentContext()) {
@@ -11370,15 +11385,21 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange(Body->getBeginLoc(), Body->getBeginLoc());
ErrorFound = NotACompoundStatement;
}
- if (ErrorFound != NoError) {
- Diag(ErrorLoc, diag::err_omp_atomic_capture_not_compound_statement)
- << ErrorRange;
- Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
- return StmtError();
- }
- if (CurContext->isDependentContext())
- UE = V = E = X = nullptr;
}
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_capture_not_compound_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
+ return StmtError();
+ }
+ if (CurContext->isDependentContext())
+ UE = V = E = X = nullptr;
+ } else if (AtomicKind == OMPC_compare) {
+ // TODO: For now we emit an error here and in emitOMPAtomicExpr we ignore
+ // code gen.
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "atomic compare is not supported for now");
+ Diag(AtomicKindLoc, DiagID);
}
setFunctionHasBranchProtectedScope();
@@ -12283,8 +12304,8 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_parallel_for_simd, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), CS, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ getOrderedNumberExpr(Clauses), CS, *this, *DSAStack, VarsWithImplicitDSA,
+ B);
if (NestedLoopCount == 0)
return StmtError();
@@ -13459,6 +13480,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -14290,6 +14312,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -14751,6 +14774,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -15056,6 +15080,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -15244,6 +15269,9 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_capture:
Res = ActOnOpenMPCaptureClause(StartLoc, EndLoc);
break;
+ case OMPC_compare:
+ Res = ActOnOpenMPCompareClause(StartLoc, EndLoc);
+ break;
case OMPC_seq_cst:
Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc);
break;
@@ -15390,6 +15418,11 @@ OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
return new (Context) OMPCaptureClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPCompareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPCompareClause(StartLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
@@ -15858,6 +15891,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -15981,9 +16015,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_private) << Type
<< getOpenMPDirectiveName(CurrDir);
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -16247,9 +16280,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_firstprivate) << Type
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -17433,8 +17465,8 @@ static bool actOnOMPReductionKindClause(
llvm::APInt InitValue =
(BOK != BO_LT) ? IsSigned ? llvm::APInt::getSignedMinValue(Size)
: llvm::APInt::getMinValue(Size)
- : IsSigned ? llvm::APInt::getSignedMaxValue(Size)
- : llvm::APInt::getMaxValue(Size);
+ : IsSigned ? llvm::APInt::getSignedMaxValue(Size)
+ : llvm::APInt::getMaxValue(Size);
Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
if (Type->isPointerType()) {
// Cast to pointer type.
@@ -17845,9 +17877,8 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
!Ty->isIntegralType(Context) && !Ty->isPointerType())) {
Diag(ELoc, diag::err_omp_linear_expected_int_or_ptr) << Type;
if (D) {
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -18059,13 +18090,12 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Update = SemaRef.ActOnFinishFullExpr(Update.get(), DE->getBeginLoc(),
/*DiscardedValue*/ false);
- // Build final: Var = InitExpr + NumIterations * Step
+ // Build final: Var = PrivCopy;
ExprResult Final;
if (!Info.first)
- Final =
- buildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), CapturedRef,
- InitExpr, NumIterations, Step, /*Subtract=*/false,
- /*IsNonRectangularLB=*/false);
+ Final = SemaRef.BuildBinOp(
+ S, RefExpr->getExprLoc(), BO_Assign, CapturedRef,
+ SemaRef.DefaultLvalueConversion(*CurPrivate).get());
else
Final = *CurPrivate;
Final = SemaRef.ActOnFinishFullExpr(Final.get(), DE->getBeginLoc(),
@@ -18124,9 +18154,8 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
Diag(ELoc, diag::err_omp_aligned_expected_array_or_ptr)
<< QType << getLangOpts().CPlusPlus << ERange;
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -18327,9 +18356,8 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_copyprivate) << Type
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -18636,22 +18664,19 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
!RefExpr->isInstantiationDependent() &&
!RefExpr->containsUnexpandedParameterPack() &&
- (OMPDependTFound &&
- DSAStack->getOMPDependT().getTypePtr() == ExprTy.getTypePtr())) {
+ (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (OMPDependTFound &&
+ DSAStack->getOMPDependT().getTypePtr() == ExprTy.getTypePtr()))) {
Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << 1
- << RefExpr->getSourceRange();
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
continue;
}
auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
- if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
- (ASE && !ASE->getBase()->isTypeDependent() &&
- !ASE->getBase()
- ->getType()
- .getNonReferenceType()
- ->isPointerType() &&
- !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
+ if (ASE && !ASE->getBase()->isTypeDependent() &&
+ !ASE->getBase()->getType().getNonReferenceType()->isPointerType() &&
+ !ASE->getBase()->getType().getNonReferenceType()->isArrayType()) {
Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
<< (LangOpts.OpenMP >= 50 ? 1 : 0)
<< (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
@@ -18934,7 +18959,7 @@ public:
if (!isa<FieldDecl>(ME->getMemberDecl())) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_expected_access_to_data_field)
- << ME->getSourceRange();
+ << ME->getSourceRange();
return false;
}
if (RelevantExpr)
@@ -18950,7 +18975,7 @@ public:
if (FD->isBitField()) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_bit_fields_forbidden_in_clause)
- << ME->getSourceRange() << getOpenMPClauseName(CKind);
+ << ME->getSourceRange() << getOpenMPClauseName(CKind);
return false;
}
if (RelevantExpr)
@@ -18970,7 +18995,7 @@ public:
if (CurType->isUnionType()) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
- << ME->getSourceRange();
+ << ME->getSourceRange();
return false;
}
return RelevantExpr || Visit(E);
@@ -18997,7 +19022,7 @@ public:
if (!E->getType()->isAnyPointerType() && !E->getType()->isArrayType()) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << AE->getSourceRange();
+ << 0 << AE->getSourceRange();
return false;
}
return RelevantExpr || Visit(E);
@@ -19006,8 +19031,7 @@ public:
// If we got an array subscript that express the whole dimension we
// can have any array expressions before. If it only expressing part of
// the dimension, we can only have unitary-size array expressions.
- if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, AE,
- E->getType()))
+ if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, AE, E->getType()))
AllowWholeSizeArraySection = false;
if (const auto *TE = dyn_cast<CXXThisExpr>(E->IgnoreParenCasts())) {
@@ -19037,7 +19061,7 @@ public:
"Array sections cannot be implicitly mapped.");
Expr *E = OASE->getBase()->IgnoreParenImpCasts();
QualType CurType =
- OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
// If the type of a list item is a reference to a type T then the type
@@ -19049,14 +19073,14 @@ public:
if (!IsPointer && !CurType->isArrayType()) {
SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << OASE->getSourceRange();
+ << 0 << OASE->getSourceRange();
return false;
}
bool NotWhole =
- checkArrayExpressionDoesNotReferToWholeSize(SemaRef, OASE, CurType);
+ checkArrayExpressionDoesNotReferToWholeSize(SemaRef, OASE, CurType);
bool NotUnity =
- checkArrayExpressionDoesNotReferToUnitySize(SemaRef, OASE, CurType);
+ checkArrayExpressionDoesNotReferToUnitySize(SemaRef, OASE, CurType);
if (AllowWholeSizeArraySection) {
// Any array section is currently allowed. Allowing a whole size array
@@ -19079,9 +19103,9 @@ public:
// compatible with the properties of the current array section.
if (NoDiagnose)
return false;
- SemaRef.Diag(
- ELoc, diag::err_array_section_does_not_specify_contiguous_storage)
- << OASE->getSourceRange();
+ SemaRef.Diag(ELoc,
+ diag::err_array_section_does_not_specify_contiguous_storage)
+ << OASE->getSourceRange();
return false;
}
@@ -19180,9 +19204,7 @@ public:
emitErrorMsg();
return false;
}
- const Expr *getFoundBase() const {
- return RelevantExpr;
- }
+ const Expr *getFoundBase() const { return RelevantExpr; }
explicit MapBaseChecker(
Sema &SemaRef, OpenMPClauseKind CKind, OpenMPDirectiveKind DKind,
OMPClauseMappableExprCommon::MappableExprComponentList &Components,
@@ -19193,9 +19215,9 @@ public:
} // namespace
/// Return the expression of the base of the mappable expression or null if it
-/// cannot be determined and do all the necessary checks to see if the expression
-/// is valid as a standalone mappable expression. In the process, record all the
-/// components of the expression.
+/// cannot be determined and do all the necessary checks to see if the
+/// expression is valid as a standalone mappable expression. In the process,
+/// record all the components of the expression.
static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
@@ -19385,9 +19407,9 @@ static bool checkMapConflicts(
return true;
}
if (CI->getAssociatedExpression()->getStmtClass() !=
- SI->getAssociatedExpression()->getStmtClass() ||
- CI->getAssociatedDeclaration()->getCanonicalDecl() ==
- SI->getAssociatedDeclaration()->getCanonicalDecl()) {
+ SI->getAssociatedExpression()->getStmtClass() ||
+ CI->getAssociatedDeclaration()->getCanonicalDecl() ==
+ SI->getAssociatedDeclaration()->getCanonicalDecl()) {
assert(CI != CE && SI != SE);
SemaRef.Diag(DerivedLoc, diag::err_omp_same_pointer_dereferenced)
<< DerivedLoc;
@@ -19608,7 +19630,7 @@ struct MappableVarListInfo {
VarBaseDeclarations.reserve(VarList.size());
}
};
-}
+} // namespace
// Check the validity of the provided variable list for the provided clause kind
// \a CKind. In the check process the valid expressions, mappable expression
@@ -21341,8 +21363,7 @@ OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
// A list item that appears in the inclusive or exclusive clause must appear
// in a reduction clause with the inscan modifier on the enclosing
// worksharing-loop, worksharing-loop SIMD, or simd construct.
- if (DVar.CKind != OMPC_reduction ||
- DVar.Modifier != OMPC_REDUCTION_inscan)
+ if (DVar.CKind != OMPC_reduction || DVar.Modifier != OMPC_REDUCTION_inscan)
Diag(ELoc, diag::err_omp_inclusive_exclusive_not_reduction)
<< RefExpr->getSourceRange();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
index 603611b2d86b..49f7a8d573d5 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
@@ -296,9 +296,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
checkExprMemoryConstraintCompat(*this, OutputExpr, Info, false))
return StmtError();
- // Disallow _ExtInt, since the backends tend to have difficulties with
- // non-normal sizes.
- if (OutputExpr->getType()->isExtIntType())
+ // Disallow bit-precise integer types, since the backends tend to have
+ // difficulties with abnormal sizes.
+ if (OutputExpr->getType()->isBitIntType())
return StmtError(
Diag(OutputExpr->getBeginLoc(), diag::err_asm_invalid_type)
<< OutputExpr->getType() << 0 /*Input*/
@@ -429,7 +429,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
}
}
- if (InputExpr->getType()->isExtIntType())
+ if (InputExpr->getType()->isBitIntType())
return StmtError(
Diag(InputExpr->getBeginLoc(), diag::err_asm_invalid_type)
<< InputExpr->getType() << 1 /*Output*/
@@ -924,7 +924,7 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
setFunctionHasBranchProtectedScope();
for (uint64_t I = 0; I < NumOutputs + NumInputs; ++I) {
- if (Exprs[I]->getType()->isExtIntType())
+ if (Exprs[I]->getType()->isBitIntType())
return StmtError(
Diag(Exprs[I]->getBeginLoc(), diag::err_asm_invalid_type)
<< Exprs[I]->getType() << (I < NumOutputs)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index f4fd2ea5aa8e..2482f6d404ea 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -6142,12 +6142,12 @@ bool UnnamedLocalNoLinkageFinder::VisitPipeType(const PipeType* T) {
return false;
}
-bool UnnamedLocalNoLinkageFinder::VisitExtIntType(const ExtIntType *T) {
+bool UnnamedLocalNoLinkageFinder::VisitBitIntType(const BitIntType *T) {
return false;
}
-bool UnnamedLocalNoLinkageFinder::VisitDependentExtIntType(
- const DependentExtIntType *T) {
+bool UnnamedLocalNoLinkageFinder::VisitDependentBitIntType(
+ const DependentBitIntType *T) {
return false;
}
@@ -7089,7 +7089,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType IntegerType = ParamType;
if (const EnumType *Enum = IntegerType->getAs<EnumType>())
IntegerType = Enum->getDecl()->getIntegerType();
- Value = Value.extOrTrunc(IntegerType->isExtIntType()
+ Value = Value.extOrTrunc(IntegerType->isBitIntType()
? Context.getIntWidth(IntegerType)
: Context.getTypeSize(IntegerType));
@@ -7184,7 +7184,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Coerce the template argument's value to the value it will have
// based on the template parameter's type.
- unsigned AllowedBits = IntegerType->isExtIntType()
+ unsigned AllowedBits = IntegerType->isBitIntType()
? Context.getIntWidth(IntegerType)
: Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index 81edae10335d..e9636d2b942e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -1597,7 +1597,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
- case Type::ExtInt:
+ case Type::BitInt:
return (TDF & TDF_SkipNonDependent) ||
((TDF & TDF_IgnoreQualifiers)
? S.Context.hasSameUnqualifiedType(P, A)
@@ -2144,10 +2144,10 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
return Sema::TDK_NonDeducedMismatch;
}
- case Type::DependentExtInt: {
- const auto *IP = P->castAs<DependentExtIntType>();
+ case Type::DependentBitInt: {
+ const auto *IP = P->castAs<DependentBitIntType>();
- if (const auto *IA = A->getAs<ExtIntType>()) {
+ if (const auto *IA = A->getAs<BitIntType>()) {
if (IP->isUnsigned() != IA->isUnsigned())
return Sema::TDK_NonDeducedMismatch;
@@ -2164,7 +2164,7 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
Deduced);
}
- if (const auto *IA = A->getAs<DependentExtIntType>()) {
+ if (const auto *IA = A->getAs<DependentBitIntType>()) {
if (IP->isUnsigned() != IA->isUnsigned())
return Sema::TDK_NonDeducedMismatch;
return Sema::TDK_Success;
@@ -5949,9 +5949,9 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
cast<DeducedType>(T)->getDeducedType(),
OnlyDeduced, Depth, Used);
break;
- case Type::DependentExtInt:
+ case Type::DependentBitInt:
MarkUsedTemplateParameters(Ctx,
- cast<DependentExtIntType>(T)->getNumBitsExpr(),
+ cast<DependentBitIntType>(T)->getNumBitsExpr(),
OnlyDeduced, Depth, Used);
break;
@@ -5966,7 +5966,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::ObjCObjectPointer:
case Type::UnresolvedUsing:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base)
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
index c0bb310e64fb..51c79e93ab0a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -870,7 +870,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_typeofExpr:
case TST_decltype:
- case TST_extint:
+ case TST_bitint:
if (DS.getRepAsExpr() &&
DS.getRepAsExpr()->containsUnexpandedParameterPack())
return true;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index d2ee669debd0..7a038301a249 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -1435,12 +1435,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
}
- case DeclSpec::TST_extint: {
- if (!S.Context.getTargetInfo().hasExtIntType())
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
- << "_ExtInt";
+ case DeclSpec::TST_bitint: {
+ if (!S.Context.getTargetInfo().hasBitIntType())
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "_BitInt";
Result =
- S.BuildExtIntType(DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned,
+ S.BuildBitIntType(DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned,
DS.getRepAsExpr(), DS.getBeginLoc());
if (Result.isNull()) {
Result = Context.IntTy;
@@ -2237,7 +2236,7 @@ QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
return Context.getWritePipeType(T);
}
-/// Build a extended int type.
+/// Build a bit-precise integer type.
///
/// \param IsUnsigned Boolean representing the signedness of the type.
///
@@ -2245,10 +2244,10 @@ QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
/// that.
///
/// \param Loc Location of the keyword.
-QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
+QualType Sema::BuildBitIntType(bool IsUnsigned, Expr *BitWidth,
SourceLocation Loc) {
if (BitWidth->isInstantiationDependent())
- return Context.getDependentExtIntType(IsUnsigned, BitWidth);
+ return Context.getDependentBitIntType(IsUnsigned, BitWidth);
llvm::APSInt Bits(32);
ExprResult ICE =
@@ -2259,22 +2258,22 @@ QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
int64_t NumBits = Bits.getSExtValue();
if (!IsUnsigned && NumBits < 2) {
- Diag(Loc, diag::err_ext_int_bad_size) << 0;
+ Diag(Loc, diag::err_bit_int_bad_size) << 0;
return QualType();
}
if (IsUnsigned && NumBits < 1) {
- Diag(Loc, diag::err_ext_int_bad_size) << 1;
+ Diag(Loc, diag::err_bit_int_bad_size) << 1;
return QualType();
}
if (NumBits > llvm::IntegerType::MAX_INT_BITS) {
- Diag(Loc, diag::err_ext_int_max_size) << IsUnsigned
- << llvm::IntegerType::MAX_INT_BITS;
+ Diag(Loc, diag::err_bit_int_max_size)
+ << IsUnsigned << llvm::IntegerType::MAX_INT_BITS;
return QualType();
}
- return Context.getExtIntType(IsUnsigned, NumBits);
+ return Context.getBitIntType(IsUnsigned, NumBits);
}
/// Check whether the specified array bound can be evaluated using the relevant
@@ -3941,6 +3940,20 @@ static CallingConv getCCForDeclaratorChunk(
break;
}
}
+ } else if (S.getLangOpts().CUDA) {
+ // If we're compiling CUDA/HIP code and targeting SPIR-V we need to make
+ // sure the kernels will be marked with the right calling convention so that
+ // they will be visible by the APIs that ingest SPIR-V.
+ llvm::Triple Triple = S.Context.getTargetInfo().getTriple();
+ if (Triple.getArch() == llvm::Triple::spirv32 ||
+ Triple.getArch() == llvm::Triple::spirv64) {
+ for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) {
+ if (AL.getKind() == ParsedAttr::AT_CUDAGlobal) {
+ CC = CC_OpenCLKernel;
+ break;
+ }
+ }
+ }
}
return CC;
@@ -6077,11 +6090,11 @@ namespace {
TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
}
- void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ void VisitExtIntTypeLoc(BitIntTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
}
- void VisitDependentExtIntTypeLoc(DependentExtIntTypeLoc TL) {
+ void VisitDependentExtIntTypeLoc(DependentBitIntTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
}
@@ -6211,7 +6224,7 @@ namespace {
assert(Chunk.Kind == DeclaratorChunk::Pipe);
TL.setKWLoc(Chunk.Loc);
}
- void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ void VisitBitIntTypeLoc(BitIntTypeLoc TL) {
TL.setNameLoc(Chunk.Loc);
}
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
@@ -6528,7 +6541,6 @@ static void HandleBTFTypeTagAttribute(QualType &Type, const ParsedAttr &Attr,
StringRef BTFTypeTag = StrLiteral->getString();
Type = State.getAttributedType(
::new (Ctx) BTFTypeTagAttr(Ctx, Attr, BTFTypeTag), Type, Type);
- return;
}
/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
@@ -9079,9 +9091,8 @@ QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
else if (!T.isTriviallyCopyableType(Context))
// Some other non-trivially-copyable type (probably a C++ class)
DisallowedKind = 7;
- else if (T->isExtIntType()) {
- DisallowedKind = 8;
- }
+ else if (T->isBitIntType())
+ DisallowedKind = 8;
if (DisallowedKind != -1) {
Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T;
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index 7f3326c13263..298a3f7a83d8 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -933,6 +933,11 @@ public:
/// the UnresolvedUsingTypenameDecl was transformed to.
QualType RebuildUnresolvedUsingType(SourceLocation NameLoc, Decl *D);
+ /// Build a new type found via an alias.
+ QualType RebuildUsingType(UsingShadowDecl *Found, QualType Underlying) {
+ return SemaRef.Context.getUsingType(Found, Underlying);
+ }
+
/// Build a new typedef type.
QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
return SemaRef.Context.getTypeDeclType(Typedef);
@@ -1195,12 +1200,12 @@ public:
QualType RebuildPipeType(QualType ValueType, SourceLocation KWLoc,
bool isReadPipe);
- /// Build an extended int given its value type.
- QualType RebuildExtIntType(bool IsUnsigned, unsigned NumBits,
+ /// Build a bit-precise int given its value type.
+ QualType RebuildBitIntType(bool IsUnsigned, unsigned NumBits,
SourceLocation Loc);
- /// Build a dependent extended int given its value type.
- QualType RebuildDependentExtIntType(bool IsUnsigned, Expr *NumBitsExpr,
+ /// Build a dependent bit-precise int given its value type.
+ QualType RebuildDependentBitIntType(bool IsUnsigned, Expr *NumBitsExpr,
SourceLocation Loc);
/// Build a new template name given a nested name specifier, a flag
@@ -6072,9 +6077,9 @@ QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
return Result;
}
-template<typename Derived> QualType
-TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
- UnresolvedUsingTypeLoc TL) {
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformUnresolvedUsingType(
+ TypeLocBuilder &TLB, UnresolvedUsingTypeLoc TL) {
const UnresolvedUsingType *T = TL.getTypePtr();
Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
if (!D)
@@ -6095,6 +6100,32 @@ TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformUsingType(TypeLocBuilder &TLB,
+ UsingTypeLoc TL) {
+ const UsingType *T = TL.getTypePtr();
+
+ auto *Found = cast_or_null<UsingShadowDecl>(getDerived().TransformDecl(
+ TL.getLocalSourceRange().getBegin(), T->getFoundDecl()));
+ if (!Found)
+ return QualType();
+
+ QualType Underlying = getDerived().TransformType(T->desugar());
+ if (Underlying.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || Found != T->getFoundDecl() ||
+ Underlying != T->getUnderlyingType()) {
+ Result = getDerived().RebuildUsingType(Found, Underlying);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ TLB.pushTypeSpec(Result).setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
TypedefTypeLoc TL) {
@@ -6430,27 +6461,27 @@ QualType TreeTransform<Derived>::TransformPipeType(TypeLocBuilder &TLB,
}
template <typename Derived>
-QualType TreeTransform<Derived>::TransformExtIntType(TypeLocBuilder &TLB,
- ExtIntTypeLoc TL) {
- const ExtIntType *EIT = TL.getTypePtr();
+QualType TreeTransform<Derived>::TransformBitIntType(TypeLocBuilder &TLB,
+ BitIntTypeLoc TL) {
+ const BitIntType *EIT = TL.getTypePtr();
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild()) {
- Result = getDerived().RebuildExtIntType(EIT->isUnsigned(),
+ Result = getDerived().RebuildBitIntType(EIT->isUnsigned(),
EIT->getNumBits(), TL.getNameLoc());
if (Result.isNull())
return QualType();
}
- ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ BitIntTypeLoc NewTL = TLB.push<BitIntTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
template <typename Derived>
-QualType TreeTransform<Derived>::TransformDependentExtIntType(
- TypeLocBuilder &TLB, DependentExtIntTypeLoc TL) {
- const DependentExtIntType *EIT = TL.getTypePtr();
+QualType TreeTransform<Derived>::TransformDependentBitIntType(
+ TypeLocBuilder &TLB, DependentBitIntTypeLoc TL) {
+ const DependentBitIntType *EIT = TL.getTypePtr();
EnterExpressionEvaluationContext Unevaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
@@ -6463,18 +6494,18 @@ QualType TreeTransform<Derived>::TransformDependentExtIntType(
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || BitsExpr.get() != EIT->getNumBitsExpr()) {
- Result = getDerived().RebuildDependentExtIntType(
+ Result = getDerived().RebuildDependentBitIntType(
EIT->isUnsigned(), BitsExpr.get(), TL.getNameLoc());
if (Result.isNull())
return QualType();
}
- if (isa<DependentExtIntType>(Result)) {
- DependentExtIntTypeLoc NewTL = TLB.push<DependentExtIntTypeLoc>(Result);
+ if (isa<DependentBitIntType>(Result)) {
+ DependentBitIntTypeLoc NewTL = TLB.push<DependentBitIntTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
} else {
- ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ BitIntTypeLoc NewTL = TLB.push<BitIntTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
}
return Result;
@@ -9431,6 +9462,13 @@ TreeTransform<Derived>::TransformOMPCaptureClause(OMPCaptureClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPCompareClause(OMPCompareClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) {
// No need to rebuild this clause, no template-dependent parameters.
return C;
@@ -14462,7 +14500,6 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
if (D->isInvalidDecl()) return QualType();
// FIXME: Doesn't account for ObjCInterfaceDecl!
- TypeDecl *Ty;
if (auto *UPD = dyn_cast<UsingPackDecl>(D)) {
// A valid resolved using typename pack expansion decl can have multiple
// UsingDecls, but they must each have exactly one type, and it must be
@@ -14498,17 +14535,18 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
// A valid resolved using typename decl points to exactly one type decl.
assert(++Using->shadow_begin() == Using->shadow_end());
- NamedDecl *Target = Using->shadow_begin()->getTargetDecl();
- if (SemaRef.DiagnoseUseOfDecl(Target, Loc))
+ UsingShadowDecl *Shadow = *Using->shadow_begin();
+ if (SemaRef.DiagnoseUseOfDecl(Shadow->getTargetDecl(), Loc))
return QualType();
- Ty = cast<TypeDecl>(Target);
+ return SemaRef.Context.getUsingType(
+ Shadow, SemaRef.Context.getTypeDeclType(
+ cast<TypeDecl>(Shadow->getTargetDecl())));
} else {
assert(isa<UnresolvedUsingTypenameDecl>(D) &&
"UnresolvedUsingTypenameDecl transformed to non-using decl");
- Ty = cast<UnresolvedUsingTypenameDecl>(D);
+ return SemaRef.Context.getTypeDeclType(
+ cast<UnresolvedUsingTypenameDecl>(D));
}
-
- return SemaRef.Context.getTypeDeclType(Ty);
}
template <typename Derived>
@@ -14557,20 +14595,20 @@ QualType TreeTransform<Derived>::RebuildPipeType(QualType ValueType,
}
template <typename Derived>
-QualType TreeTransform<Derived>::RebuildExtIntType(bool IsUnsigned,
+QualType TreeTransform<Derived>::RebuildBitIntType(bool IsUnsigned,
unsigned NumBits,
SourceLocation Loc) {
llvm::APInt NumBitsAP(SemaRef.Context.getIntWidth(SemaRef.Context.IntTy),
NumBits, true);
IntegerLiteral *Bits = IntegerLiteral::Create(SemaRef.Context, NumBitsAP,
SemaRef.Context.IntTy, Loc);
- return SemaRef.BuildExtIntType(IsUnsigned, Bits, Loc);
+ return SemaRef.BuildBitIntType(IsUnsigned, Bits, Loc);
}
template <typename Derived>
-QualType TreeTransform<Derived>::RebuildDependentExtIntType(
+QualType TreeTransform<Derived>::RebuildDependentBitIntType(
bool IsUnsigned, Expr *NumBitsExpr, SourceLocation Loc) {
- return SemaRef.BuildExtIntType(IsUnsigned, NumBitsExpr, Loc);
+ return SemaRef.BuildBitIntType(IsUnsigned, NumBitsExpr, Loc);
}
template<typename Derived>
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
index a033bccbe506..f93e0d2ed1c4 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
@@ -6607,6 +6607,10 @@ void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
+void TypeLocReader::VisitUsingTypeLoc(UsingTypeLoc TL) {
+ TL.setNameLoc(readSourceLocation());
+}
+
void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
@@ -6772,11 +6776,11 @@ void TypeLocReader::VisitPipeTypeLoc(PipeTypeLoc TL) {
TL.setKWLoc(readSourceLocation());
}
-void TypeLocReader::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
+void TypeLocReader::VisitBitIntTypeLoc(clang::BitIntTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
-void TypeLocReader::VisitDependentExtIntTypeLoc(
- clang::DependentExtIntTypeLoc TL) {
+void TypeLocReader::VisitDependentBitIntTypeLoc(
+ clang::DependentBitIntTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
@@ -11761,6 +11765,9 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_capture:
C = new (Context) OMPCaptureClause();
break;
+ case llvm::omp::OMPC_compare:
+ C = new (Context) OMPCompareClause();
+ break;
case llvm::omp::OMPC_seq_cst:
C = new (Context) OMPSeqCstClause();
break;
@@ -12119,6 +12126,8 @@ void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *C) {
void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
+void OMPClauseReader::VisitOMPCompareClause(OMPCompareClause *) {}
+
void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
void OMPClauseReader::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index 62a31f299d6b..2144befcdb14 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -2948,6 +2948,7 @@ uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint64_t LocalOffset) {
static bool isSameTemplateParameterList(const ASTContext &C,
const TemplateParameterList *X,
const TemplateParameterList *Y);
+static bool isSameEntity(NamedDecl *X, NamedDecl *Y);
/// Determine whether two template parameters are similar enough
/// that they may be used in declarations of the same template.
@@ -2967,7 +2968,9 @@ static bool isSameTemplateParameter(const NamedDecl *X,
if (!TXTC != !TYTC)
return false;
if (TXTC && TYTC) {
- if (TXTC->getNamedConcept() != TYTC->getNamedConcept())
+ auto *NCX = TXTC->getNamedConcept();
+ auto *NCY = TYTC->getNamedConcept();
+ if (!NCX || !NCY || !isSameEntity(NCX, NCY))
return false;
if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs())
return false;
@@ -3111,11 +3114,12 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
/// Determine whether the two declarations refer to the same entity.
static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
- assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
-
if (X == Y)
return true;
+ if (X->getDeclName() != Y->getDeclName())
+ return false;
+
// Must be in the same context.
//
// Note that we can't use DeclContext::Equals here, because the DeclContexts
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index a1972f5c6496..65a780e67510 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -396,6 +396,10 @@ void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+void TypeLocWriter::VisitUsingTypeLoc(UsingTypeLoc TL) {
+ Record.AddSourceLocation(TL.getNameLoc());
+}
+
void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
@@ -562,11 +566,11 @@ void TypeLocWriter::VisitPipeTypeLoc(PipeTypeLoc TL) {
Record.AddSourceLocation(TL.getKWLoc());
}
-void TypeLocWriter::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
+void TypeLocWriter::VisitBitIntTypeLoc(clang::BitIntTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
-void TypeLocWriter::VisitDependentExtIntTypeLoc(
- clang::DependentExtIntTypeLoc TL) {
+void TypeLocWriter::VisitDependentBitIntTypeLoc(
+ clang::DependentBitIntTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
@@ -6248,6 +6252,8 @@ void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *C) {
void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
+void OMPClauseWriter::VisitOMPCompareClause(OMPCompareClause *) {}
+
void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
void OMPClauseWriter::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 13781b336426..4a56156de4b2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -66,7 +66,8 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
case Builtin::BI__builtin_expect:
case Builtin::BI__builtin_expect_with_probability:
case Builtin::BI__builtin_assume_aligned:
- case Builtin::BI__builtin_addressof: {
+ case Builtin::BI__builtin_addressof:
+ case Builtin::BI__builtin_function_start: {
// For __builtin_unpredictable, __builtin_expect,
// __builtin_expect_with_probability and __builtin_assume_aligned,
// just return the value of the subexpression.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
index 8da482a2aec9..6ea39fb95e9a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -56,9 +56,8 @@ private:
void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
CheckerContext &C) const {
- // TODO: For now we only warn about DeclRefExpr, to avoid noise. Warn for
- // calculations also.
- if (!isa<DeclRefExpr>(Cast->IgnoreParenImpCasts()))
+ // Don't warn for implicit conversions to bool
+ if (Cast->getType()->isBooleanType())
return;
// Don't warn for loss of sign/precision in macros.
@@ -70,6 +69,9 @@ void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
const Stmt *Parent = PM.getParent(Cast);
if (!Parent)
return;
+ // Dont warn if this is part of an explicit cast
+ if (isa<ExplicitCastExpr>(Parent))
+ return;
bool LossOfSign = false;
bool LossOfPrecision = false;
@@ -78,8 +80,10 @@ void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
if (const auto *B = dyn_cast<BinaryOperator>(Parent)) {
BinaryOperator::Opcode Opc = B->getOpcode();
if (Opc == BO_Assign) {
- LossOfSign = isLossOfSign(Cast, C);
- LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
+ if (!Cast->IgnoreParenImpCasts()->isEvaluatable(C.getASTContext())) {
+ LossOfSign = isLossOfSign(Cast, C);
+ LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
+ }
} else if (Opc == BO_AddAssign || Opc == BO_SubAssign) {
// No loss of sign.
LossOfPrecision = isLossOfPrecision(Cast, B->getLHS()->getType(), C);
@@ -98,7 +102,12 @@ void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
} else if (B->isRelationalOp() || B->isMultiplicativeOp()) {
LossOfSign = isLossOfSign(Cast, C);
}
- } else if (isa<DeclStmt>(Parent)) {
+ } else if (isa<DeclStmt, ReturnStmt>(Parent)) {
+ if (!Cast->IgnoreParenImpCasts()->isEvaluatable(C.getASTContext())) {
+ LossOfSign = isLossOfSign(Cast, C);
+ LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
+ }
+ } else {
LossOfSign = isLossOfSign(Cast, C);
LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
index e3f4be0726c8..6e4801aa8e91 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
@@ -389,7 +389,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
llvm::raw_string_ostream OS(SBuf);
OS << "Function '" << FuncDecl->getDeclName()
<< "' returns an open handle";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -405,7 +405,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
llvm::raw_string_ostream OS(SBuf);
OS << "Function '" << FuncDecl->getDeclName()
<< "' returns an unowned handle";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -439,7 +439,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
llvm::raw_string_ostream OS(SBuf);
OS << "Handle released through " << ParamDiagIdx
<< llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -453,7 +453,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
llvm::raw_string_ostream OS(SBuf);
OS << "Handle allocated through " << ParamDiagIdx
<< llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -467,7 +467,7 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
llvm::raw_string_ostream OS(SBuf);
OS << "Unowned handle allocated through " << ParamDiagIdx
<< llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
- return OS.str();
+ return SBuf;
} else
return "";
});
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
index 6a40f8eda5fa..b4352b450c7f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
@@ -25,8 +25,6 @@ bool isStdSmartPtrCall(const CallEvent &Call);
bool isStdSmartPtr(const CXXRecordDecl *RD);
bool isStdSmartPtr(const Expr *E);
-bool isStdSmartPtr(const CXXRecordDecl *RD);
-
/// Returns whether the smart pointer is null or not.
bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 764dad3e7ab4..ae46709340d3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -762,9 +762,9 @@ void CXXInstanceCall::getInitialStackFrameContents(
QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class));
// FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
- bool Failed;
- ThisVal = StateMgr.getStoreManager().attemptDownCast(ThisVal, Ty, Failed);
- if (Failed) {
+ Optional<SVal> V =
+ StateMgr.getStoreManager().evalBaseToDerived(ThisVal, Ty);
+ if (!V.hasValue()) {
// We might have suffered some sort of placement new earlier, so
// we're constructing in a completely unexpected storage.
// Fall back to a generic pointer cast for this-value.
@@ -772,7 +772,8 @@ void CXXInstanceCall::getInitialStackFrameContents(
const CXXRecordDecl *StaticClass = StaticMD->getParent();
QualType StaticTy = Ctx.getPointerType(Ctx.getRecordType(StaticClass));
ThisVal = SVB.evalCast(ThisVal, Ty, StaticTy);
- }
+ } else
+ ThisVal = *V;
}
if (!ThisVal.isUnknown())
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 69d67cf9b465..637e4edfd778 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -439,14 +439,15 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
- bool Failed = false;
-
- // Check if the value being cast evaluates to 0.
- if (val.isZeroConstant())
- Failed = true;
- // Else, evaluate the cast.
- else
- val = getStoreManager().attemptDownCast(val, T, Failed);
+ bool Failed = true;
+
+ // Check if the value being cast does not evaluates to 0.
+ if (!val.isZeroConstant())
+ if (Optional<SVal> V =
+ StateMgr.getStoreManager().evalBaseToDerived(val, T)) {
+ val = *V;
+ Failed = false;
+ }
if (Failed) {
if (T->isReferenceType()) {
@@ -478,14 +479,13 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
- bool Failed = false;
-
if (!val.isConstant()) {
- val = getStoreManager().attemptDownCast(val, T, Failed);
+ Optional<SVal> V = getStoreManager().evalBaseToDerived(val, T);
+ val = V ? *V : UnknownVal();
}
// Failed to cast or the result is unknown, fall back to conservative.
- if (Failed || val.isUnknown()) {
+ if (val.isUnknown()) {
val =
svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx, resultType,
currBldrCtx->blockCount());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 3b847d6f0d87..b4578385a147 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -410,7 +410,7 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
}
// Append files to the main report file in the order they appear in the path
- for (auto I : llvm::make_range(FileIDs.begin() + 1, FileIDs.end())) {
+ for (auto I : llvm::drop_begin(FileIDs)) {
std::string s;
llvm::raw_string_ostream os(s);
@@ -437,7 +437,7 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
for (auto BI : *Buf)
os << BI;
- return os.str();
+ return file;
}
void HTMLDiagnostics::dumpCoverageData(
@@ -534,7 +534,7 @@ document.addEventListener("DOMContentLoaded", function() {
</form>
)<<<";
- return os.str();
+ return s;
}
void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
@@ -1202,7 +1202,7 @@ std::string getSpanBeginForControl(const char *ClassName, unsigned Index) {
std::string Result;
llvm::raw_string_ostream OS(Result);
OS << "<span id=\"" << ClassName << Index << "\">";
- return OS.str();
+ return Result;
}
std::string getSpanBeginForControlStart(unsigned Index) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index f77fcb030a15..58bea12f8783 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -444,7 +444,7 @@ std::string MemRegion::getString() const {
std::string s;
llvm::raw_string_ostream os(s);
dumpToStream(os);
- return os.str();
+ return s;
}
void MemRegion::dumpToStream(raw_ostream &os) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 23c67c64f975..4b0d4942e528 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -110,6 +110,14 @@ public:
RangeSet::ContainerType RangeSet::Factory::EmptySet{};
+RangeSet RangeSet::Factory::add(RangeSet LHS, RangeSet RHS) {
+ ContainerType Result;
+ Result.reserve(LHS.size() + RHS.size());
+ std::merge(LHS.begin(), LHS.end(), RHS.begin(), RHS.end(),
+ std::back_inserter(Result));
+ return makePersistent(std::move(Result));
+}
+
RangeSet RangeSet::Factory::add(RangeSet Original, Range Element) {
ContainerType Result;
Result.reserve(Original.size() + 1);
@@ -126,6 +134,186 @@ RangeSet RangeSet::Factory::add(RangeSet Original, const llvm::APSInt &Point) {
return add(Original, Range(Point));
}
+RangeSet RangeSet::Factory::unite(RangeSet LHS, RangeSet RHS) {
+ ContainerType Result = unite(*LHS.Impl, *RHS.Impl);
+ return makePersistent(std::move(Result));
+}
+
+RangeSet RangeSet::Factory::unite(RangeSet Original, Range R) {
+ ContainerType Result;
+ Result.push_back(R);
+ Result = unite(*Original.Impl, Result);
+ return makePersistent(std::move(Result));
+}
+
+RangeSet RangeSet::Factory::unite(RangeSet Original, llvm::APSInt Point) {
+ return unite(Original, Range(ValueFactory.getValue(Point)));
+}
+
+RangeSet RangeSet::Factory::unite(RangeSet Original, llvm::APSInt From,
+ llvm::APSInt To) {
+ return unite(Original,
+ Range(ValueFactory.getValue(From), ValueFactory.getValue(To)));
+}
+
+template <typename T>
+void swapIterators(T &First, T &FirstEnd, T &Second, T &SecondEnd) {
+ std::swap(First, Second);
+ std::swap(FirstEnd, SecondEnd);
+}
+
+RangeSet::ContainerType RangeSet::Factory::unite(const ContainerType &LHS,
+ const ContainerType &RHS) {
+ if (LHS.empty())
+ return RHS;
+ if (RHS.empty())
+ return LHS;
+
+ using llvm::APSInt;
+ using iterator = ContainerType::const_iterator;
+
+ iterator First = LHS.begin();
+ iterator FirstEnd = LHS.end();
+ iterator Second = RHS.begin();
+ iterator SecondEnd = RHS.end();
+ APSIntType Ty = APSIntType(First->From());
+ const APSInt Min = Ty.getMinValue();
+
+ // Handle a corner case first when both range sets start from MIN.
+ // This helps to avoid complicated conditions below. Specifically, this
+ // particular check for `MIN` is not needed in the loop below every time
+ // when we do `Second->From() - One` operation.
+ if (Min == First->From() && Min == Second->From()) {
+ if (First->To() > Second->To()) {
+ // [ First ]--->
+ // [ Second ]----->
+ // MIN^
+ // The Second range is entirely inside the First one.
+
+ // Check if Second is the last in its RangeSet.
+ if (++Second == SecondEnd)
+ // [ First ]--[ First + 1 ]--->
+ // [ Second ]--------------------->
+ // MIN^
+ // The Union is equal to First's RangeSet.
+ return LHS;
+ } else {
+ // case 1: [ First ]----->
+ // case 2: [ First ]--->
+ // [ Second ]--->
+ // MIN^
+ // The First range is entirely inside or equal to the Second one.
+
+ // Check if First is the last in its RangeSet.
+ if (++First == FirstEnd)
+ // [ First ]----------------------->
+ // [ Second ]--[ Second + 1 ]---->
+ // MIN^
+ // The Union is equal to Second's RangeSet.
+ return RHS;
+ }
+ }
+
+ const APSInt One = Ty.getValue(1);
+ ContainerType Result;
+
+ // This is called when there are no ranges left in one of the ranges.
+ // Append the rest of the ranges from another range set to the Result
+ // and return with that.
+ const auto AppendTheRest = [&Result](iterator I, iterator E) {
+ Result.append(I, E);
+ return Result;
+ };
+
+ while (true) {
+ // We want to keep the following invariant at all times:
+ // ---[ First ------>
+ // -----[ Second --->
+ if (First->From() > Second->From())
+ swapIterators(First, FirstEnd, Second, SecondEnd);
+
+ // The Union definitely starts with First->From().
+ // ----------[ First ------>
+ // ------------[ Second --->
+ // ----------[ Union ------>
+ // UnionStart^
+ const llvm::APSInt &UnionStart = First->From();
+
+ // Loop where the invariant holds.
+ while (true) {
+ // Skip all enclosed ranges.
+ // ---[ First ]--->
+ // -----[ Second ]--[ Second + 1 ]--[ Second + N ]----->
+ while (First->To() >= Second->To()) {
+ // Check if Second is the last in its RangeSet.
+ if (++Second == SecondEnd) {
+ // Append the Union.
+ // ---[ Union ]--->
+ // -----[ Second ]----->
+ // --------[ First ]--->
+ // UnionEnd^
+ Result.emplace_back(UnionStart, First->To());
+ // ---[ Union ]----------------->
+ // --------------[ First + 1]--->
+ // Append all remaining ranges from the First's RangeSet.
+ return AppendTheRest(++First, FirstEnd);
+ }
+ }
+
+ // Check if First and Second are disjoint. It means that we find
+ // the end of the Union. Exit the loop and append the Union.
+ // ---[ First ]=------------->
+ // ------------=[ Second ]--->
+ // ----MinusOne^
+ if (First->To() < Second->From() - One)
+ break;
+
+ // First is entirely inside the Union. Go next.
+ // ---[ Union ----------->
+ // ---- [ First ]-------->
+ // -------[ Second ]----->
+ // Check if First is the last in its RangeSet.
+ if (++First == FirstEnd) {
+ // Append the Union.
+ // ---[ Union ]--->
+ // -----[ First ]------->
+ // --------[ Second ]--->
+ // UnionEnd^
+ Result.emplace_back(UnionStart, Second->To());
+ // ---[ Union ]------------------>
+ // --------------[ Second + 1]--->
+ // Append all remaining ranges from the Second's RangeSet.
+ return AppendTheRest(++Second, SecondEnd);
+ }
+
+ // We know that we are at one of the two cases:
+ // case 1: --[ First ]--------->
+ // case 2: ----[ First ]------->
+ // --------[ Second ]---------->
+ // In both cases First starts after Second->From().
+ // Make sure that the loop invariant holds.
+ swapIterators(First, FirstEnd, Second, SecondEnd);
+ }
+
+ // Here First and Second are disjoint.
+ // Append the Union.
+ // ---[ Union ]--------------->
+ // -----------------[ Second ]--->
+ // ------[ First ]--------------->
+ // UnionEnd^
+ Result.emplace_back(UnionStart, First->To());
+
+ // Check if First is the last in its RangeSet.
+ if (++First == FirstEnd)
+ // ---[ Union ]--------------->
+ // --------------[ Second ]--->
+ // Append all remaining ranges from the Second's RangeSet.
+ return AppendTheRest(Second, SecondEnd);
+ }
+
+ llvm_unreachable("Normally, we should not reach here");
+}
+
RangeSet RangeSet::Factory::getRangeSet(Range From) {
ContainerType Result;
Result.push_back(From);
@@ -155,13 +343,6 @@ RangeSet::ContainerType *RangeSet::Factory::construct(ContainerType &&From) {
return new (Buffer) ContainerType(std::move(From));
}
-RangeSet RangeSet::Factory::add(RangeSet LHS, RangeSet RHS) {
- ContainerType Result;
- std::merge(LHS.begin(), LHS.end(), RHS.begin(), RHS.end(),
- std::back_inserter(Result));
- return makePersistent(std::move(Result));
-}
-
const llvm::APSInt &RangeSet::getMinValue() const {
assert(!isEmpty());
return begin()->From();
@@ -325,11 +506,6 @@ RangeSet RangeSet::Factory::intersect(const RangeSet::ContainerType &LHS,
const_iterator First = LHS.begin(), Second = RHS.begin(),
FirstEnd = LHS.end(), SecondEnd = RHS.end();
- const auto SwapIterators = [&First, &FirstEnd, &Second, &SecondEnd]() {
- std::swap(First, Second);
- std::swap(FirstEnd, SecondEnd);
- };
-
// If we ran out of ranges in one set, but not in the other,
// it means that those elements are definitely not in the
// intersection.
@@ -339,7 +515,7 @@ RangeSet RangeSet::Factory::intersect(const RangeSet::ContainerType &LHS,
// ----[ First ---------------------->
// --------[ Second ----------------->
if (Second->From() < First->From())
- SwapIterators();
+ swapIterators(First, FirstEnd, Second, SecondEnd);
// Loop where the invariant holds:
do {
@@ -373,7 +549,7 @@ RangeSet RangeSet::Factory::intersect(const RangeSet::ContainerType &LHS,
if (Second->To() > First->To()) {
// Here we make a decision to keep First as the "longer"
// range.
- SwapIterators();
+ swapIterators(First, FirstEnd, Second, SecondEnd);
}
// At this point, we have the following situation:
@@ -2191,42 +2367,6 @@ LLVM_NODISCARD ProgramStateRef reAssume(ProgramStateRef State,
Constraint->getMaxValue(), true);
}
-// Simplify the given symbol with the help of the SValBuilder. In
-// SValBuilder::symplifySval, we traverse the symbol tree and query the
-// constraint values for the sub-trees and if a value is a constant we do the
-// constant folding. Compound symbols might collapse to simpler symbol tree
-// that is still possible to further simplify. Thus, we do the simplification on
-// a new symbol tree until we reach the simplest form, i.e. the fixpoint.
-//
-// Consider the following symbol `(b * b) * b * b` which has this tree:
-// *
-// / \
-// * b
-// / \
-// / b
-// (b * b)
-// Now, if the `b * b == 1` new constraint is added then during the first
-// iteration we have the following transformations:
-// * *
-// / \ / \
-// * b --> b b
-// / \
-// / b
-// 1
-// We need another iteration to reach the final result `1`.
-LLVM_NODISCARD
-static SVal simplifyUntilFixpoint(SValBuilder &SVB, ProgramStateRef State,
- const SymbolRef Sym) {
- SVal Val = SVB.makeSymbolVal(Sym);
- SVal SimplifiedVal = SVB.simplifySVal(State, Val);
- // Do the simplification until we can.
- while (SimplifiedVal != Val) {
- Val = SimplifiedVal;
- SimplifiedVal = SVB.simplifySVal(State, Val);
- }
- return SimplifiedVal;
-}
-
// Iterate over all symbols and try to simplify them. Once a symbol is
// simplified then we check if we can merge the simplified symbol's equivalence
// class to this class. This way, we simplify not just the symbols but the
@@ -2238,8 +2378,7 @@ EquivalenceClass::simplify(SValBuilder &SVB, RangeSet::Factory &F,
SymbolSet ClassMembers = Class.getClassMembers(State);
for (const SymbolRef &MemberSym : ClassMembers) {
- const SVal SimplifiedMemberVal =
- simplifyUntilFixpoint(SVB, State, MemberSym);
+ const SVal SimplifiedMemberVal = simplifyToSVal(State, MemberSym);
const SymbolRef SimplifiedMemberSym = SimplifiedMemberVal.getAsSymbol();
// The symbol is collapsed to a constant, check if the current State is
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 71bfc86ab8f7..8edcef319088 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -459,13 +459,23 @@ SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
return evalBinOpLN(state, op, *LV, rhs.castAs<NonLoc>(), type);
}
- if (Optional<Loc> RV = rhs.getAs<Loc>()) {
- // Support pointer arithmetic where the addend is on the left
- // and the pointer on the right.
- assert(op == BO_Add);
+ if (const Optional<Loc> RV = rhs.getAs<Loc>()) {
+ const auto IsCommutative = [](BinaryOperatorKind Op) {
+ return Op == BO_Mul || Op == BO_Add || Op == BO_And || Op == BO_Xor ||
+ Op == BO_Or;
+ };
+
+ if (IsCommutative(op)) {
+ // Swap operands.
+ return evalBinOpLN(state, op, *RV, lhs.castAs<NonLoc>(), type);
+ }
- // Commute the operands.
- return evalBinOpLN(state, op, *RV, lhs.castAs<NonLoc>(), type);
+ // If the right operand is a concrete int location then we have nothing
+ // better but to treat it as a simple nonloc.
+ if (auto RV = rhs.getAs<loc::ConcreteInt>()) {
+ const nonloc::ConcreteInt RhsAsLoc = makeIntVal(RV->getValue());
+ return evalBinOpNN(state, op, lhs.castAs<NonLoc>(), RhsAsLoc, type);
+ }
}
return evalBinOpNN(state, op, lhs.castAs<NonLoc>(), rhs.castAs<NonLoc>(),
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 4ca35dd06ae5..dad8a7b3caae 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -21,6 +21,35 @@ using namespace ento;
namespace {
class SimpleSValBuilder : public SValBuilder {
+
+ // With one `simplifySValOnce` call, a compound symbols might collapse to
+ // simpler symbol tree that is still possible to further simplify. Thus, we
+ // do the simplification on a new symbol tree until we reach the simplest
+ // form, i.e. the fixpoint.
+ // Consider the following symbol `(b * b) * b * b` which has this tree:
+ // *
+ // / \
+ // * b
+ // / \
+ // / b
+ // (b * b)
+ // Now, if the `b * b == 1` new constraint is added then during the first
+ // iteration we have the following transformations:
+ // * *
+ // / \ / \
+ // * b --> b b
+ // / \
+ // / b
+ // 1
+ // We need another iteration to reach the final result `1`.
+ SVal simplifyUntilFixpoint(ProgramStateRef State, SVal Val);
+
+ // Recursively descends into symbolic expressions and replaces symbols
+ // with their known values (in the sense of the getKnownValue() method).
+ // We traverse the symbol tree and query the constraint values for the
+ // sub-trees and if a value is a constant we do the constant folding.
+ SVal simplifySValOnce(ProgramStateRef State, SVal V);
+
public:
SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
ProgramStateManager &stateMgr)
@@ -40,8 +69,6 @@ public:
/// (integer) value, that value is returned. Otherwise, returns NULL.
const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V) override;
- /// Recursively descends into symbolic expressions and replaces symbols
- /// with their known values (in the sense of the getKnownValue() method).
SVal simplifySVal(ProgramStateRef State, SVal V) override;
SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
@@ -1105,7 +1132,20 @@ const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
return nullptr;
}
+SVal SimpleSValBuilder::simplifyUntilFixpoint(ProgramStateRef State, SVal Val) {
+ SVal SimplifiedVal = simplifySValOnce(State, Val);
+ while (SimplifiedVal != Val) {
+ Val = SimplifiedVal;
+ SimplifiedVal = simplifySValOnce(State, Val);
+ }
+ return SimplifiedVal;
+}
+
SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
+ return simplifyUntilFixpoint(State, V);
+}
+
+SVal SimpleSValBuilder::simplifySValOnce(ProgramStateRef State, SVal V) {
// For now, this function tries to constant-fold symbols inside a
// nonloc::SymbolVal, and does nothing else. More simplifications should
// be possible, such as constant-folding an index in an ElementRegion.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
index 05feb1325c93..2bcdb0faf5da 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -314,10 +314,7 @@ static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
return nullptr;
}
-SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
- bool &Failed) {
- Failed = false;
-
+Optional<SVal> StoreManager::evalBaseToDerived(SVal Base, QualType TargetType) {
const MemRegion *MR = Base.getAsRegion();
if (!MR)
return UnknownVal();
@@ -392,7 +389,9 @@ SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
}
// We failed if the region we ended up with has perfect type info.
- Failed = isa<TypedValueRegion>(MR);
+ if (isa<TypedValueRegion>(MR))
+ return None;
+
return UnknownVal();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f692c68045ee..f6ddcb763f9d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -35,6 +35,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -493,13 +494,11 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
}
}
-static bool isBisonFile(ASTContext &C) {
+static bool fileContainsString(StringRef Substring, ASTContext &C) {
const SourceManager &SM = C.getSourceManager();
FileID FID = SM.getMainFileID();
StringRef Buffer = SM.getBufferOrFake(FID).getBuffer();
- if (Buffer.startswith("/* A Bison parser, made by"))
- return true;
- return false;
+ return Buffer.contains(Substring);
}
void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
@@ -546,38 +545,48 @@ void AnalysisConsumer::reportAnalyzerProgress(StringRef S) {
}
void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
-
// Don't run the actions if an error has occurred with parsing the file.
DiagnosticsEngine &Diags = PP.getDiagnostics();
if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
return;
- if (isBisonFile(C)) {
+ // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
+ // FIXME: This should be replaced with something that doesn't rely on
+ // side-effects in PathDiagnosticConsumer's destructor. This is required when
+ // used with option -disable-free.
+ const auto DiagFlusherScopeExit =
+ llvm::make_scope_exit([this] { Mgr.reset(); });
+
+ if (Opts->ShouldIgnoreBisonGeneratedFiles &&
+ fileContainsString("/* A Bison parser, made by", C)) {
reportAnalyzerProgress("Skipping bison-generated file\n");
- } else if (Opts->DisableAllCheckers) {
+ return;
+ }
- // Don't analyze if the user explicitly asked for no checks to be performed
- // on this file.
+ if (Opts->ShouldIgnoreFlexGeneratedFiles &&
+ fileContainsString("/* A lexical scanner generated by flex", C)) {
+ reportAnalyzerProgress("Skipping flex-generated file\n");
+ return;
+ }
+
+ // Don't analyze if the user explicitly asked for no checks to be performed
+ // on this file.
+ if (Opts->DisableAllCheckers) {
reportAnalyzerProgress("All checks are disabled using a supplied option\n");
- } else {
- // Otherwise, just run the analysis.
- runAnalysisOnTranslationUnit(C);
+ return;
}
+ // Otherwise, just run the analysis.
+ runAnalysisOnTranslationUnit(C);
+
// Count how many basic blocks we have not covered.
NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
NumVisitedBlocksInAnalyzedFunctions =
FunctionSummaries.getTotalNumVisitedBasicBlocks();
if (NumBlocksInAnalyzedFunctions > 0)
PercentReachableBlocks =
- (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
+ (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
NumBlocksInAnalyzedFunctions;
-
- // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
- // FIXME: This should be replaced with something that doesn't rely on
- // side-effects in PathDiagnosticConsumer's destructor. This is required when
- // used with option -disable-free.
- Mgr.reset();
}
AnalysisConsumer::AnalysisMode
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index f7c711690d7e..acceec690c11 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -9,66 +9,54 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h"
#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
#include "llvm/Support/Threading.h"
using namespace clang;
using namespace tooling;
using namespace dependencies;
-CachedFileSystemEntry CachedFileSystemEntry::createFileEntry(
- StringRef Filename, llvm::vfs::FileSystem &FS, bool Minimize) {
+llvm::ErrorOr<llvm::vfs::Status>
+CachedFileSystemEntry::initFile(StringRef Filename, llvm::vfs::FileSystem &FS) {
// Load the file and its content from the file system.
- llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> MaybeFile =
- FS.openFileForRead(Filename);
+ auto MaybeFile = FS.openFileForRead(Filename);
if (!MaybeFile)
return MaybeFile.getError();
- llvm::ErrorOr<llvm::vfs::Status> Stat = (*MaybeFile)->status();
- if (!Stat)
- return Stat.getError();
+ auto File = std::move(*MaybeFile);
+
+ auto MaybeStat = File->status();
+ if (!MaybeStat)
+ return MaybeStat.getError();
+ auto Stat = std::move(*MaybeStat);
- llvm::vfs::File &F = **MaybeFile;
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MaybeBuffer =
- F.getBuffer(Stat->getName());
+ auto MaybeBuffer = File->getBuffer(Stat.getName());
if (!MaybeBuffer)
return MaybeBuffer.getError();
+ auto Buffer = std::move(*MaybeBuffer);
+
+ OriginalContents = std::move(Buffer);
+ return Stat;
+}
+
+void CachedFileSystemEntry::minimizeFile() {
+ assert(OriginalContents && "minimizing missing contents");
llvm::SmallString<1024> MinimizedFileContents;
// Minimize the file down to directives that might affect the dependencies.
- const auto &Buffer = *MaybeBuffer;
SmallVector<minimize_source_to_dependency_directives::Token, 64> Tokens;
- if (!Minimize || minimizeSourceToDependencyDirectives(
- Buffer->getBuffer(), MinimizedFileContents, Tokens)) {
- // Use the original file unless requested otherwise, or
- // if the minimization failed.
- // FIXME: Propage the diagnostic if desired by the client.
- CachedFileSystemEntry Result;
- Result.MaybeStat = std::move(*Stat);
- Result.Contents.reserve(Buffer->getBufferSize() + 1);
- Result.Contents.append(Buffer->getBufferStart(), Buffer->getBufferEnd());
- // Implicitly null terminate the contents for Clang's lexer.
- Result.Contents.push_back('\0');
- Result.Contents.pop_back();
- return Result;
+ if (minimizeSourceToDependencyDirectives(OriginalContents->getBuffer(),
+ MinimizedFileContents, Tokens)) {
+ // FIXME: Propagate the diagnostic if desired by the client.
+ // Use the original file if the minimization failed.
+ MinimizedContentsStorage =
+ llvm::MemoryBuffer::getMemBuffer(*OriginalContents);
+ MinimizedContentsAccess.store(MinimizedContentsStorage.get());
+ return;
}
- CachedFileSystemEntry Result;
- size_t Size = MinimizedFileContents.size();
- Result.MaybeStat = llvm::vfs::Status(Stat->getName(), Stat->getUniqueID(),
- Stat->getLastModificationTime(),
- Stat->getUser(), Stat->getGroup(), Size,
- Stat->getType(), Stat->getPermissions());
// The contents produced by the minimizer must be null terminated.
assert(MinimizedFileContents.data()[MinimizedFileContents.size()] == '\0' &&
"not null terminated contents");
- // Even though there's an implicit null terminator in the minimized contents,
- // we want to temporarily make it explicit. This will ensure that the
- // std::move will preserve it even if it needs to do a copy if the
- // SmallString still has the small capacity.
- MinimizedFileContents.push_back('\0');
- Result.Contents = std::move(MinimizedFileContents);
- // Now make the null terminator implicit again, so that Clang's lexer can find
- // it right where the buffer ends.
- Result.Contents.pop_back();
// Compute the skipped PP ranges that speedup skipping over inactive
// preprocessor blocks.
@@ -86,20 +74,20 @@ CachedFileSystemEntry CachedFileSystemEntry::createFileEntry(
}
Mapping[Range.Offset] = Range.Length;
}
- Result.PPSkippedRangeMapping = std::move(Mapping);
-
- return Result;
-}
-
-CachedFileSystemEntry
-CachedFileSystemEntry::createDirectoryEntry(llvm::vfs::Status &&Stat) {
- assert(Stat.isDirectory() && "not a directory!");
- auto Result = CachedFileSystemEntry();
- Result.MaybeStat = std::move(Stat);
- return Result;
+ PPSkippedRangeMapping = std::move(Mapping);
+
+ MinimizedContentsStorage = std::make_unique<llvm::SmallVectorMemoryBuffer>(
+ std::move(MinimizedFileContents));
+ // The algorithm in `getOrCreateFileSystemEntry` uses the presence of
+ // minimized contents to decide whether an entry is up-to-date or not.
+ // If it is up-to-date, the skipped range mappings must be already computed.
+ // This is why we need to store the minimized contents **after** storing the
+ // skipped range mappings. Failing to do so would lead to a data race.
+ MinimizedContentsAccess.store(MinimizedContentsStorage.get());
}
-DependencyScanningFilesystemSharedCache::SingleCache::SingleCache() {
+DependencyScanningFilesystemSharedCache::
+ DependencyScanningFilesystemSharedCache() {
// This heuristic was chosen using a empirical testing on a
// reasonably high core machine (iMacPro 18 cores / 36 threads). The cache
// sharding gives a performance edge by reducing the lock contention.
@@ -111,19 +99,13 @@ DependencyScanningFilesystemSharedCache::SingleCache::SingleCache() {
}
DependencyScanningFilesystemSharedCache::SharedFileSystemEntry &
-DependencyScanningFilesystemSharedCache::SingleCache::get(StringRef Key) {
+DependencyScanningFilesystemSharedCache::get(StringRef Key) {
CacheShard &Shard = CacheShards[llvm::hash_value(Key) % NumShards];
- std::unique_lock<std::mutex> LockGuard(Shard.CacheLock);
+ std::lock_guard<std::mutex> LockGuard(Shard.CacheLock);
auto It = Shard.Cache.try_emplace(Key);
return It.first->getValue();
}
-DependencyScanningFilesystemSharedCache::SharedFileSystemEntry &
-DependencyScanningFilesystemSharedCache::get(StringRef Key, bool Minimized) {
- SingleCache &Cache = Minimized ? CacheMinimized : CacheOriginal;
- return Cache.get(Key);
-}
-
/// Whitelist file extensions that should be minimized, treating no extension as
/// a source file that should be minimized.
///
@@ -134,15 +116,14 @@ static bool shouldMinimizeBasedOnExtension(StringRef Filename) {
if (Ext.empty())
return true; // C++ standard library
return llvm::StringSwitch<bool>(Ext)
- .CasesLower(".c", ".cc", ".cpp", ".c++", ".cxx", true)
- .CasesLower(".h", ".hh", ".hpp", ".h++", ".hxx", true)
- .CasesLower(".m", ".mm", true)
- .CasesLower(".i", ".ii", ".mi", ".mmi", true)
- .CasesLower(".def", ".inc", true)
- .Default(false);
+ .CasesLower(".c", ".cc", ".cpp", ".c++", ".cxx", true)
+ .CasesLower(".h", ".hh", ".hpp", ".h++", ".hxx", true)
+ .CasesLower(".m", ".mm", true)
+ .CasesLower(".i", ".ii", ".mi", ".mmi", true)
+ .CasesLower(".def", ".inc", true)
+ .Default(false);
}
-
static bool shouldCacheStatFailures(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
@@ -167,38 +148,33 @@ bool DependencyScanningWorkerFilesystem::shouldMinimize(StringRef RawFilename) {
return !NotToBeMinimized.contains(Filename);
}
-CachedFileSystemEntry DependencyScanningWorkerFilesystem::createFileSystemEntry(
- llvm::ErrorOr<llvm::vfs::Status> &&MaybeStatus, StringRef Filename,
- bool ShouldMinimize) {
- if (!MaybeStatus)
- return CachedFileSystemEntry(MaybeStatus.getError());
-
- if (MaybeStatus->isDirectory())
- return CachedFileSystemEntry::createDirectoryEntry(std::move(*MaybeStatus));
-
- return CachedFileSystemEntry::createFileEntry(Filename, getUnderlyingFS(),
- ShouldMinimize);
+void CachedFileSystemEntry::init(llvm::ErrorOr<llvm::vfs::Status> &&MaybeStatus,
+ StringRef Filename,
+ llvm::vfs::FileSystem &FS) {
+ if (!MaybeStatus || MaybeStatus->isDirectory())
+ MaybeStat = std::move(MaybeStatus);
+ else
+ MaybeStat = initFile(Filename, FS);
}
-llvm::ErrorOr<const CachedFileSystemEntry *>
+llvm::ErrorOr<EntryRef>
DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
- const StringRef Filename) {
- bool ShouldMinimize = shouldMinimize(Filename);
+ StringRef Filename) {
+ bool ShouldBeMinimized = shouldMinimize(Filename);
- if (const auto *Entry = Cache.getCachedEntry(Filename, ShouldMinimize))
- return Entry;
+ const auto *Entry = LocalCache.getCachedEntry(Filename);
+ if (Entry && !Entry->needsUpdate(ShouldBeMinimized))
+ return EntryRef(ShouldBeMinimized, *Entry);
// FIXME: Handle PCM/PCH files.
// FIXME: Handle module map files.
- DependencyScanningFilesystemSharedCache::SharedFileSystemEntry
- &SharedCacheEntry = SharedCache.get(Filename, ShouldMinimize);
- const CachedFileSystemEntry *Result;
+ auto &SharedCacheEntry = SharedCache.get(Filename);
{
- std::unique_lock<std::mutex> LockGuard(SharedCacheEntry.ValueLock);
+ std::lock_guard<std::mutex> LockGuard(SharedCacheEntry.ValueLock);
CachedFileSystemEntry &CacheEntry = SharedCacheEntry.Value;
- if (!CacheEntry.isValid()) {
+ if (!CacheEntry.isInitialized()) {
auto MaybeStatus = getUnderlyingFS().status(Filename);
if (!MaybeStatus && !shouldCacheStatFailures(Filename))
// HACK: We need to always restat non source files if the stat fails.
@@ -206,27 +182,30 @@ DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
// files before building them, and then looks for them again. If we
// cache the stat failure, it won't see them the second time.
return MaybeStatus.getError();
- CacheEntry = createFileSystemEntry(std::move(MaybeStatus), Filename,
- ShouldMinimize);
+ CacheEntry.init(std::move(MaybeStatus), Filename, getUnderlyingFS());
}
- Result = &CacheEntry;
+ // Checking `needsUpdate` verifies the entry represents an opened file.
+ // Only checking `needsMinimization` could lead to minimization of files
+ // that we failed to load (such files don't have `OriginalContents`).
+ if (CacheEntry.needsUpdate(ShouldBeMinimized))
+ CacheEntry.minimizeFile();
}
// Store the result in the local cache.
- Cache.setCachedEntry(Filename, ShouldMinimize, Result);
- return Result;
+ Entry = &SharedCacheEntry.Value;
+ return EntryRef(ShouldBeMinimized, *Entry);
}
llvm::ErrorOr<llvm::vfs::Status>
DependencyScanningWorkerFilesystem::status(const Twine &Path) {
SmallString<256> OwnedFilename;
StringRef Filename = Path.toStringRef(OwnedFilename);
- const llvm::ErrorOr<const CachedFileSystemEntry *> Result =
- getOrCreateFileSystemEntry(Filename);
+
+ llvm::ErrorOr<EntryRef> Result = getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
- return (*Result)->getStatus();
+ return Result->getStatus();
}
namespace {
@@ -240,7 +219,7 @@ public:
: Buffer(std::move(Buffer)), Stat(std::move(Stat)) {}
static llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
- create(const CachedFileSystemEntry *Entry,
+ create(EntryRef Entry,
ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings);
llvm::ErrorOr<llvm::vfs::Status> status() override { return Stat; }
@@ -261,21 +240,22 @@ private:
} // end anonymous namespace
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> MinimizedVFSFile::create(
- const CachedFileSystemEntry *Entry,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings) {
- if (Entry->isDirectory())
- return llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>(
- std::make_error_code(std::errc::is_a_directory));
- llvm::ErrorOr<StringRef> Contents = Entry->getContents();
+ EntryRef Entry, ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings) {
+ if (Entry.isDirectory())
+ return std::make_error_code(std::errc::is_a_directory);
+
+ llvm::ErrorOr<StringRef> Contents = Entry.getContents();
if (!Contents)
return Contents.getError();
auto Result = std::make_unique<MinimizedVFSFile>(
- llvm::MemoryBuffer::getMemBuffer(*Contents, Entry->getName(),
+ llvm::MemoryBuffer::getMemBuffer(*Contents, Entry.getName(),
/*RequiresNullTerminator=*/false),
- *Entry->getStatus());
- if (!Entry->getPPSkippedRangeMapping().empty() && PPSkipMappings)
- (*PPSkipMappings)[Result->Buffer->getBufferStart()] =
- &Entry->getPPSkippedRangeMapping();
+ *Entry.getStatus());
+
+ const auto *EntrySkipMappings = Entry.getPPSkippedRangeMapping();
+ if (EntrySkipMappings && !EntrySkipMappings->empty() && PPSkipMappings)
+ (*PPSkipMappings)[Result->Buffer->getBufferStart()] = EntrySkipMappings;
+
return llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>(
std::unique_ptr<llvm::vfs::File>(std::move(Result)));
}
@@ -285,8 +265,7 @@ DependencyScanningWorkerFilesystem::openFileForRead(const Twine &Path) {
SmallString<256> OwnedFilename;
StringRef Filename = Path.toStringRef(OwnedFilename);
- const llvm::ErrorOr<const CachedFileSystemEntry *> Result =
- getOrCreateFileSystemEntry(Filename);
+ llvm::ErrorOr<EntryRef> Result = getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
return MinimizedVFSFile::create(Result.get(), PPSkipMappings);
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index 383a850301a1..086215e7a573 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -37,9 +37,13 @@ CompilerInvocation ModuleDepCollector::makeInvocationForModuleBuildWithoutPaths(
CI.getLangOpts()->resetNonModularOptions();
CI.getPreprocessorOpts().resetNonModularOptions();
- // Remove options incompatible with explicit module build.
+ // Remove options incompatible with explicit module build or are likely to
+ // differ between identical modules discovered from different translation
+ // units.
CI.getFrontendOpts().Inputs.clear();
CI.getFrontendOpts().OutputFile.clear();
+ CI.getCodeGenOpts().MainFileName.clear();
+ CI.getCodeGenOpts().DwarfDebugFlags.clear();
CI.getFrontendOpts().ProgramAction = frontend::GenerateModule;
CI.getLangOpts()->ModuleName = Deps.ID.ModuleName;
@@ -233,7 +237,13 @@ ModuleID ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
.getHeaderSearchInfo()
.getModuleMap()
.getModuleMapFileForUniquing(M);
- MD.ClangModuleMapFile = std::string(ModuleMap ? ModuleMap->getName() : "");
+
+ if (ModuleMap) {
+ StringRef Path = ModuleMap->tryGetRealPathName();
+ if (Path.empty())
+ Path = ModuleMap->getName();
+ MD.ClangModuleMapFile = std::string(Path);
+ }
serialization::ModuleFile *MF =
MDC.ScanInstance.getASTReader()->getModuleManager().lookup(
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
index 8a31e776d030..e2014f965c90 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -927,5 +927,5 @@ std::string TokenBuffer::dumpForTests() const {
M.EndExpanded);
}
}
- return OS.str();
+ return Dump;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
index 1e3a90f3a316..c813865e95cd 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
@@ -263,7 +263,7 @@ std::string syntax::Node::dumpTokens(const SourceManager &SM) const {
OS << " ";
}
});
- return OS.str();
+ return Storage;
}
void syntax::Node::assertInvariants() const {